diff --git a/[refs] b/[refs] index 21b72c1a73b4..1c8957d8cb52 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a20da984fb5ddedb9e8e699c04e10fe0ca609440 +refs/heads/master: 0e0c3408a5414d4e1f8ca7fadcb513c13bd747e8 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 13f472188e32..5d72dd57b78a 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -7454,7 +7454,8 @@ S: Maintained F: drivers/net/ethernet/dec/tulip/ TUN/TAP driver -M: Maxim Krasnyansky +M: Maxim Krasnyansky +L: vtun@office.satix.net W: http://vtun.sourceforge.net/tun S: Maintained F: Documentation/networking/tuntap.txt diff --git a/trunk/drivers/isdn/mISDN/tei.c b/trunk/drivers/isdn/mISDN/tei.c index 592f597d8951..be88728f1106 100644 --- a/trunk/drivers/isdn/mISDN/tei.c +++ b/trunk/drivers/isdn/mISDN/tei.c @@ -250,7 +250,7 @@ tei_debug(struct FsmInst *fi, char *fmt, ...) static int get_free_id(struct manager *mgr) { - DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 }; + u64 ids = 0; int i; struct layer2 *l2; @@ -261,11 +261,11 @@ get_free_id(struct manager *mgr) __func__); return -EBUSY; } - __set_bit(l2->ch.nr, ids); + test_and_set_bit(l2->ch.nr, (u_long *)&ids); } - i = find_next_zero_bit(ids, 64, 1); - if (i < 64) - return i; + for (i = 1; i < 64; i++) + if (!test_bit(i, (u_long *)&ids)) + return i; printk(KERN_WARNING "%s: more as 63 layer2 for one device\n", __func__); return -EBUSY; @@ -274,7 +274,7 @@ get_free_id(struct manager *mgr) static int get_free_tei(struct manager *mgr) { - DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 }; + u64 ids = 0; int i; struct layer2 *l2; @@ -288,11 +288,11 @@ get_free_tei(struct manager *mgr) continue; i -= 64; - __set_bit(i, ids); + test_and_set_bit(i, (u_long *)&ids); } - i = find_first_zero_bit(ids, 64); - if (i < 64) - return i + 64; + for (i = 0; i < 64; i++) + if (!test_bit(i, (u_long *)&ids)) + return i + 64; printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n", __func__); return -1; diff --git a/trunk/drivers/net/bonding/bond_alb.c b/trunk/drivers/net/bonding/bond_alb.c index 7c9d136e74be..e15cc11edbbe 100644 --- a/trunk/drivers/net/bonding/bond_alb.c +++ b/trunk/drivers/net/bonding/bond_alb.c @@ -84,10 +84,6 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) /* Forward declaration */ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); -static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); -static void rlb_src_unlink(struct bonding *bond, u32 index); -static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, - u32 ip_dst_hash); static inline u8 _simple_hash(const u8 *hash_start, int hash_size) { @@ -358,18 +354,6 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, if (!arp) goto out; - /* We received an ARP from arp->ip_src. - * We might have used this IP address previously (on the bonding host - * itself or on a system that is bridged together with the bond). - * However, if arp->mac_src is different than what is stored in - * rx_hashtbl, some other host is now using the IP and we must prevent - * sending out client updates with this IP address and the old MAC - * address. - * Clean up all hash table entries that have this address as ip_src but - * have a different mac_src. - */ - rlb_purge_src_ip(bond, arp); - if (arp->op_code == htons(ARPOP_REPLY)) { /* update rx hash table for this ARP */ rlb_update_entry_from_arp(bond, arp); @@ -448,9 +432,9 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) _lock_rx_hashtbl_bh(bond); rx_hash_table = bond_info->rx_hashtbl; - index = bond_info->rx_hashtbl_used_head; + index = bond_info->rx_hashtbl_head; for (; index != RLB_NULL_INDEX; index = next_index) { - next_index = rx_hash_table[index].used_next; + next_index = rx_hash_table[index].next; if (rx_hash_table[index].slave == slave) { struct slave *assigned_slave = rlb_next_rx_slave(bond); @@ -535,9 +519,8 @@ static void rlb_update_rx_clients(struct bonding *bond) _lock_rx_hashtbl_bh(bond); - hash_index = bond_info->rx_hashtbl_used_head; - for (; hash_index != RLB_NULL_INDEX; - hash_index = client_info->used_next) { + hash_index = bond_info->rx_hashtbl_head; + for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { client_info = &(bond_info->rx_hashtbl[hash_index]); if (client_info->ntt) { rlb_update_client(client_info); @@ -565,9 +548,8 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla _lock_rx_hashtbl_bh(bond); - hash_index = bond_info->rx_hashtbl_used_head; - for (; hash_index != RLB_NULL_INDEX; - hash_index = client_info->used_next) { + hash_index = bond_info->rx_hashtbl_head; + for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->slave == slave) && @@ -596,9 +578,8 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) _lock_rx_hashtbl(bond); - hash_index = bond_info->rx_hashtbl_used_head; - for (; hash_index != RLB_NULL_INDEX; - hash_index = client_info->used_next) { + hash_index = bond_info->rx_hashtbl_head; + for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { client_info = &(bond_info->rx_hashtbl[hash_index]); if (!client_info->slave) { @@ -644,7 +625,6 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon /* update mac address from arp */ memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); } - memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN); assigned_slave = client_info->slave; if (assigned_slave) { @@ -667,17 +647,6 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon assigned_slave = rlb_next_rx_slave(bond); if (assigned_slave) { - if (!(client_info->assigned && - client_info->ip_src == arp->ip_src)) { - /* ip_src is going to be updated, - * fix the src hash list - */ - u32 hash_src = _simple_hash((u8 *)&arp->ip_src, - sizeof(arp->ip_src)); - rlb_src_unlink(bond, hash_index); - rlb_src_link(bond, hash_src, hash_index); - } - client_info->ip_src = arp->ip_src; client_info->ip_dst = arp->ip_dst; /* arp->mac_dst is broadcast for arp reqeusts. @@ -685,7 +654,6 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon * upon receiving an arp reply. */ memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); - memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN); client_info->slave = assigned_slave; if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { @@ -701,11 +669,11 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon } if (!client_info->assigned) { - u32 prev_tbl_head = bond_info->rx_hashtbl_used_head; - bond_info->rx_hashtbl_used_head = hash_index; - client_info->used_next = prev_tbl_head; + u32 prev_tbl_head = bond_info->rx_hashtbl_head; + bond_info->rx_hashtbl_head = hash_index; + client_info->next = prev_tbl_head; if (prev_tbl_head != RLB_NULL_INDEX) { - bond_info->rx_hashtbl[prev_tbl_head].used_prev = + bond_info->rx_hashtbl[prev_tbl_head].prev = hash_index; } client_info->assigned = 1; @@ -726,12 +694,6 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) struct arp_pkt *arp = arp_pkt(skb); struct slave *tx_slave = NULL; - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). - */ - if (!bond_slave_has_mac(bond, arp->mac_src)) - return NULL; - if (arp->op_code == htons(ARPOP_REPLY)) { /* the arp must be sent on the selected * rx channel @@ -778,9 +740,8 @@ static void rlb_rebalance(struct bonding *bond) _lock_rx_hashtbl_bh(bond); ntt = 0; - hash_index = bond_info->rx_hashtbl_used_head; - for (; hash_index != RLB_NULL_INDEX; - hash_index = client_info->used_next) { + hash_index = bond_info->rx_hashtbl_head; + for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { client_info = &(bond_info->rx_hashtbl[hash_index]); assigned_slave = rlb_next_rx_slave(bond); if (assigned_slave && (client_info->slave != assigned_slave)) { @@ -798,113 +759,11 @@ static void rlb_rebalance(struct bonding *bond) } /* Caller must hold rx_hashtbl lock */ -static void rlb_init_table_entry_dst(struct rlb_client_info *entry) -{ - entry->used_next = RLB_NULL_INDEX; - entry->used_prev = RLB_NULL_INDEX; - entry->assigned = 0; - entry->slave = NULL; - entry->tag = 0; -} -static void rlb_init_table_entry_src(struct rlb_client_info *entry) -{ - entry->src_first = RLB_NULL_INDEX; - entry->src_prev = RLB_NULL_INDEX; - entry->src_next = RLB_NULL_INDEX; -} - static void rlb_init_table_entry(struct rlb_client_info *entry) { memset(entry, 0, sizeof(struct rlb_client_info)); - rlb_init_table_entry_dst(entry); - rlb_init_table_entry_src(entry); -} - -static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index) -{ - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - u32 next_index = bond_info->rx_hashtbl[index].used_next; - u32 prev_index = bond_info->rx_hashtbl[index].used_prev; - - if (index == bond_info->rx_hashtbl_used_head) - bond_info->rx_hashtbl_used_head = next_index; - if (prev_index != RLB_NULL_INDEX) - bond_info->rx_hashtbl[prev_index].used_next = next_index; - if (next_index != RLB_NULL_INDEX) - bond_info->rx_hashtbl[next_index].used_prev = prev_index; -} - -/* unlink a rlb hash table entry from the src list */ -static void rlb_src_unlink(struct bonding *bond, u32 index) -{ - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - u32 next_index = bond_info->rx_hashtbl[index].src_next; - u32 prev_index = bond_info->rx_hashtbl[index].src_prev; - - bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX; - bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX; - - if (next_index != RLB_NULL_INDEX) - bond_info->rx_hashtbl[next_index].src_prev = prev_index; - - if (prev_index == RLB_NULL_INDEX) - return; - - /* is prev_index pointing to the head of this list? */ - if (bond_info->rx_hashtbl[prev_index].src_first == index) - bond_info->rx_hashtbl[prev_index].src_first = next_index; - else - bond_info->rx_hashtbl[prev_index].src_next = next_index; - -} - -static void rlb_delete_table_entry(struct bonding *bond, u32 index) -{ - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]); - - rlb_delete_table_entry_dst(bond, index); - rlb_init_table_entry_dst(entry); - - rlb_src_unlink(bond, index); -} - -/* add the rx_hashtbl[ip_dst_hash] entry to the list - * of entries with identical ip_src_hash - */ -static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash) -{ - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - u32 next; - - bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash; - next = bond_info->rx_hashtbl[ip_src_hash].src_first; - bond_info->rx_hashtbl[ip_dst_hash].src_next = next; - if (next != RLB_NULL_INDEX) - bond_info->rx_hashtbl[next].src_prev = ip_dst_hash; - bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash; -} - -/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does - * not match arp->mac_src */ -static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) -{ - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src)); - u32 index; - - _lock_rx_hashtbl_bh(bond); - - index = bond_info->rx_hashtbl[ip_src_hash].src_first; - while (index != RLB_NULL_INDEX) { - struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]); - u32 next_index = entry->src_next; - if (entry->ip_src == arp->ip_src && - !ether_addr_equal_64bits(arp->mac_src, entry->mac_src)) - rlb_delete_table_entry(bond, index); - index = next_index; - } - _unlock_rx_hashtbl_bh(bond); + entry->next = RLB_NULL_INDEX; + entry->prev = RLB_NULL_INDEX; } static int rlb_initialize(struct bonding *bond) @@ -922,7 +781,7 @@ static int rlb_initialize(struct bonding *bond) bond_info->rx_hashtbl = new_hashtbl; - bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; + bond_info->rx_hashtbl_head = RLB_NULL_INDEX; for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) { rlb_init_table_entry(bond_info->rx_hashtbl + i); @@ -944,7 +803,7 @@ static void rlb_deinitialize(struct bonding *bond) kfree(bond_info->rx_hashtbl); bond_info->rx_hashtbl = NULL; - bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; + bond_info->rx_hashtbl_head = RLB_NULL_INDEX; _unlock_rx_hashtbl_bh(bond); } @@ -956,13 +815,25 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) _lock_rx_hashtbl_bh(bond); - curr_index = bond_info->rx_hashtbl_used_head; + curr_index = bond_info->rx_hashtbl_head; while (curr_index != RLB_NULL_INDEX) { struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]); - u32 next_index = bond_info->rx_hashtbl[curr_index].used_next; + u32 next_index = bond_info->rx_hashtbl[curr_index].next; + u32 prev_index = bond_info->rx_hashtbl[curr_index].prev; + + if (curr->tag && (curr->vlan_id == vlan_id)) { + if (curr_index == bond_info->rx_hashtbl_head) { + bond_info->rx_hashtbl_head = next_index; + } + if (prev_index != RLB_NULL_INDEX) { + bond_info->rx_hashtbl[prev_index].next = next_index; + } + if (next_index != RLB_NULL_INDEX) { + bond_info->rx_hashtbl[next_index].prev = prev_index; + } - if (curr->tag && (curr->vlan_id == vlan_id)) - rlb_delete_table_entry(bond, curr_index); + rlb_init_table_entry(curr); + } curr_index = next_index; } diff --git a/trunk/drivers/net/bonding/bond_alb.h b/trunk/drivers/net/bonding/bond_alb.h index e7a5b8b37ea3..90f140a2d197 100644 --- a/trunk/drivers/net/bonding/bond_alb.h +++ b/trunk/drivers/net/bonding/bond_alb.h @@ -94,35 +94,15 @@ struct tlb_client_info { /* ------------------------------------------------------------------------- * struct rlb_client_info contains all info related to a specific rx client - * connection. This is the Clients Hash Table entry struct. - * Note that this is not a proper hash table; if a new client's IP address - * hash collides with an existing client entry, the old entry is replaced. - * - * There is a linked list (linked by the used_next and used_prev members) - * linking all the used entries of the hash table. This allows updating - * all the clients without walking over all the unused elements of the table. - * - * There are also linked lists of entries with identical hash(ip_src). These - * allow cleaning up the table from ip_src<->mac_src associations that have - * become outdated and would cause sending out invalid ARP updates to the - * network. These are linked by the (src_next and src_prev members). + * connection. This is the Clients Hash Table entry struct * ------------------------------------------------------------------------- */ struct rlb_client_info { __be32 ip_src; /* the server IP address */ __be32 ip_dst; /* the client IP address */ - u8 mac_src[ETH_ALEN]; /* the server MAC address */ u8 mac_dst[ETH_ALEN]; /* the client MAC address */ - - /* list of used hash table entries, starting at rx_hashtbl_used_head */ - u32 used_next; - u32 used_prev; - - /* ip_src based hashing */ - u32 src_next; /* next entry with same hash(ip_src) */ - u32 src_prev; /* prev entry with same hash(ip_src) */ - u32 src_first; /* first entry with hash(ip_src) == this entry's index */ - + u32 next; /* The next Hash table entry index */ + u32 prev; /* The previous Hash table entry index */ u8 assigned; /* checking whether this entry is assigned */ u8 ntt; /* flag - need to transmit client info */ struct slave *slave; /* the slave assigned to this client */ @@ -151,7 +131,7 @@ struct alb_bond_info { int rlb_enabled; struct rlb_client_info *rx_hashtbl; /* Receive hash table */ spinlock_t rx_hashtbl_lock; - u32 rx_hashtbl_used_head; + u32 rx_hashtbl_head; u8 rx_ntt; /* flag - need to transmit * to all rx clients */ diff --git a/trunk/drivers/net/bonding/bond_debugfs.c b/trunk/drivers/net/bonding/bond_debugfs.c index 5fc4c2351478..2cf084eb9d52 100644 --- a/trunk/drivers/net/bonding/bond_debugfs.c +++ b/trunk/drivers/net/bonding/bond_debugfs.c @@ -31,9 +31,8 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); - hash_index = bond_info->rx_hashtbl_used_head; - for (; hash_index != RLB_NULL_INDEX; - hash_index = client_info->used_next) { + hash_index = bond_info->rx_hashtbl_head; + for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { client_info = &(bond_info->rx_hashtbl[hash_index]); seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", &client_info->ip_src, diff --git a/trunk/drivers/net/bonding/bonding.h b/trunk/drivers/net/bonding/bonding.h index 6dded569b111..f8af2fcd3d16 100644 --- a/trunk/drivers/net/bonding/bonding.h +++ b/trunk/drivers/net/bonding/bonding.h @@ -22,7 +22,6 @@ #include #include #include -#include #include "bond_3ad.h" #include "bond_alb.h" @@ -451,18 +450,6 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn) } #endif -static inline struct slave *bond_slave_has_mac(struct bonding *bond, - const u8 *mac) -{ - int i = 0; - struct slave *tmp; - - bond_for_each_slave(bond, tmp, i) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return tmp; - - return NULL; -} /* exported from bond_main.c */ extern int bond_net_id; diff --git a/trunk/drivers/net/can/dev.c b/trunk/drivers/net/can/dev.c index 8233e5ed2939..963e2ccd10db 100644 --- a/trunk/drivers/net/can/dev.c +++ b/trunk/drivers/net/can/dev.c @@ -609,7 +609,8 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - del_timer_sync(&priv->restart_timer); + if (del_timer_sync(&priv->restart_timer)) + dev_put(dev); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/trunk/drivers/net/can/mscan/mscan.c b/trunk/drivers/net/can/mscan/mscan.c index e6b40954e204..2b104d5f422c 100644 --- a/trunk/drivers/net/can/mscan/mscan.c +++ b/trunk/drivers/net/can/mscan/mscan.c @@ -517,8 +517,12 @@ static irqreturn_t mscan_isr(int irq, void *dev_id) static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode) { + struct mscan_priv *priv = netdev_priv(dev); int ret = 0; + if (!priv->open_time) + return -EINVAL; + switch (mode) { case CAN_MODE_START: ret = mscan_restart(dev); @@ -586,6 +590,8 @@ static int mscan_open(struct net_device *dev) goto exit_napi_disable; } + priv->open_time = jiffies; + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) setbits8(®s->canctl1, MSCAN_LISTEN); else @@ -600,6 +606,7 @@ static int mscan_open(struct net_device *dev) return 0; exit_free_irq: + priv->open_time = 0; free_irq(dev->irq, dev); exit_napi_disable: napi_disable(&priv->napi); @@ -620,6 +627,7 @@ static int mscan_close(struct net_device *dev) mscan_set_mode(dev, MSCAN_INIT_MODE); close_candev(dev); free_irq(dev->irq, dev); + priv->open_time = 0; return 0; } diff --git a/trunk/drivers/net/can/mscan/mscan.h b/trunk/drivers/net/can/mscan/mscan.h index af2ed8baf0a3..b43e9f5d3268 100644 --- a/trunk/drivers/net/can/mscan/mscan.h +++ b/trunk/drivers/net/can/mscan/mscan.h @@ -281,6 +281,7 @@ struct tx_queue_entry { struct mscan_priv { struct can_priv can; /* must be the first member */ unsigned int type; /* MSCAN type variants */ + long open_time; unsigned long flags; void __iomem *reg_base; /* ioremap'ed address to registers */ u8 shadow_statflg; diff --git a/trunk/drivers/net/can/sja1000/sja1000.c b/trunk/drivers/net/can/sja1000/sja1000.c index 83ee11eca0e2..25011dbe1b96 100644 --- a/trunk/drivers/net/can/sja1000/sja1000.c +++ b/trunk/drivers/net/can/sja1000/sja1000.c @@ -188,6 +188,11 @@ static void sja1000_start(struct net_device *dev) static int sja1000_set_mode(struct net_device *dev, enum can_mode mode) { + struct sja1000_priv *priv = netdev_priv(dev); + + if (!priv->open_time) + return -EINVAL; + switch (mode) { case CAN_MODE_START: sja1000_start(dev); @@ -574,6 +579,7 @@ static int sja1000_open(struct net_device *dev) /* init and start chi */ sja1000_start(dev); + priv->open_time = jiffies; netif_start_queue(dev); @@ -592,6 +598,8 @@ static int sja1000_close(struct net_device *dev) close_candev(dev); + priv->open_time = 0; + return 0; } diff --git a/trunk/drivers/net/can/sja1000/sja1000.h b/trunk/drivers/net/can/sja1000/sja1000.h index afa99847a510..23fff06875f5 100644 --- a/trunk/drivers/net/can/sja1000/sja1000.h +++ b/trunk/drivers/net/can/sja1000/sja1000.h @@ -152,6 +152,7 @@ */ struct sja1000_priv { struct can_priv can; /* must be the first member */ + int open_time; struct sk_buff *echo_skb; /* the lower-layer is responsible for appropriate locking */ diff --git a/trunk/drivers/net/can/usb/ems_usb.c b/trunk/drivers/net/can/usb/ems_usb.c index c69f0b72b352..086fa321677a 100644 --- a/trunk/drivers/net/can/usb/ems_usb.c +++ b/trunk/drivers/net/can/usb/ems_usb.c @@ -245,6 +245,7 @@ struct ems_tx_urb_context { struct ems_usb { struct can_priv can; /* must be the first member */ + int open_time; struct sk_buff *echo_skb[MAX_TX_URBS]; @@ -727,6 +728,7 @@ static int ems_usb_open(struct net_device *netdev) return err; } + dev->open_time = jiffies; netif_start_queue(netdev); @@ -876,6 +878,8 @@ static int ems_usb_close(struct net_device *netdev) close_candev(netdev); + dev->open_time = 0; + return 0; } @@ -901,6 +905,9 @@ static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct ems_usb *dev = netdev_priv(netdev); + if (!dev->open_time) + return -EINVAL; + switch (mode) { case CAN_MODE_START: if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL)) diff --git a/trunk/drivers/net/can/usb/esd_usb2.c b/trunk/drivers/net/can/usb/esd_usb2.c index 9b74d1e3ad44..124e0dd3490c 100644 --- a/trunk/drivers/net/can/usb/esd_usb2.c +++ b/trunk/drivers/net/can/usb/esd_usb2.c @@ -217,6 +217,7 @@ struct esd_usb2_net_priv { struct usb_anchor tx_submitted; struct esd_tx_urb_context tx_contexts[MAX_TX_URBS]; + int open_time; struct esd_usb2 *usb2; struct net_device *netdev; int index; @@ -694,6 +695,8 @@ static int esd_usb2_open(struct net_device *netdev) return err; } + priv->open_time = jiffies; + netif_start_queue(netdev); return 0; @@ -861,6 +864,8 @@ static int esd_usb2_close(struct net_device *netdev) close_candev(netdev); + priv->open_time = 0; + return 0; } @@ -936,6 +941,11 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev, static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode) { + struct esd_usb2_net_priv *priv = netdev_priv(netdev); + + if (!priv->open_time) + return -EINVAL; + switch (mode) { case CAN_MODE_START: netif_wake_queue(netdev); diff --git a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.c index d9290ea788e0..c4643c400d46 100644 --- a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -520,6 +520,7 @@ static int peak_usb_ndo_open(struct net_device *netdev) return err; } + dev->open_time = jiffies; netif_start_queue(netdev); return 0; @@ -575,6 +576,7 @@ static int peak_usb_ndo_stop(struct net_device *netdev) close_candev(netdev); + dev->open_time = 0; dev->can.state = CAN_STATE_STOPPED; /* can set bus off now */ @@ -659,6 +661,9 @@ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode) struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; + if (!dev->open_time) + return -EINVAL; + switch (mode) { case CAN_MODE_START: err = peak_usb_restart(dev); diff --git a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.h index 073b47ff8eee..c8e5e91d7cb5 100644 --- a/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/trunk/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -104,6 +104,7 @@ struct peak_usb_device { struct can_priv can; struct peak_usb_adapter *adapter; unsigned int ctrl_idx; + int open_time; u32 state; struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS]; diff --git a/trunk/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/trunk/drivers/net/ethernet/intel/e1000e/80003es2lan.c index e73c2c355993..4dd18a1f45d2 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -26,7 +26,8 @@ *******************************************************************************/ -/* 80003ES2LAN Gigabit Ethernet Controller (Copper) +/* + * 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) */ @@ -79,8 +80,7 @@ 1 = 50-80M 2 = 80-110M 3 = 110-140M - 4 = >140M - */ + 4 = >140M */ /* Kumeran Mode Control Register (Page 193, Register 16) */ #define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 @@ -95,7 +95,8 @@ /* In-Band Control Register (Page 194, Register 18) */ #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ -/* A table for the GG82563 cable length where the range is defined +/* + * A table for the GG82563 cable length where the range is defined * with a lower bound at "index" and the upper bound at * "index + 5". */ @@ -182,7 +183,8 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); - /* Added to a constant, "size" becomes the left-shift value + /* + * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; @@ -373,7 +375,8 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) if (!(swfw_sync & (fwmask | swmask))) break; - /* Firmware currently using resource (fwmask) + /* + * Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000e_put_hw_semaphore(hw); @@ -439,7 +442,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { - /* Use Alternative Page Select register to access + /* + * Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; @@ -453,7 +457,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, } if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { - /* The "ready" bit in the MDIC register may be incorrectly set + /* + * The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ @@ -508,7 +513,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { - /* Use Alternative Page Select register to access + /* + * Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; @@ -522,7 +528,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, } if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { - /* The "ready" bit in the MDIC register may be incorrectly set + /* + * The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ @@ -611,7 +618,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) u16 phy_data; bool link; - /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); @@ -649,7 +657,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) return ret_val; if (!link) { - /* We didn't get link. + /* + * We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1000e_phy_reset_dsp(hw); @@ -668,7 +677,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Resetting the phy means we need to verify the TX_CLK corresponds + /* + * Resetting the phy means we need to verify the TX_CLK corresponds * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. */ phy_data &= ~GG82563_MSCR_TX_CLK_MASK; @@ -677,7 +687,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) else phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; - /* In addition, we must re-enable CRS on Tx for both half and full + /* + * In addition, we must re-enable CRS on Tx for both half and full * duplex. */ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; @@ -755,7 +766,8 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) s32 ret_val; u16 kum_reg_data; - /* Prevent the PCI-E bus from sticking if there is no TLP connection + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); @@ -887,7 +899,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) hw->dev_spec.e80003es2lan.mdic_wa_enable = false; } - /* Clear all of the statistics registers (clear on read). It is + /* + * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. @@ -932,7 +945,8 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) reg |= (1 << 28); ew32(TARC(1), reg); - /* Disable IPv6 extension header parsing because some malformed + /* + * Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ reg = er32(RFCTL); @@ -965,7 +979,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Options: + /* + * Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode @@ -991,7 +1006,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) break; } - /* Options: + /* + * Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled @@ -1049,7 +1065,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Do not init these registers when the HW is in IAMT mode, since the + /* + * Do not init these registers when the HW is in IAMT mode, since the * firmware will have already initialized them. We only initialize * them if the HW is not in IAMT mode. */ @@ -1070,7 +1087,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) return ret_val; } - /* Workaround: Disable padding in Kumeran interface in the MAC + /* + * Workaround: Disable padding in Kumeran interface in the MAC * and in the PHY to avoid CRC errors. */ ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); @@ -1103,7 +1121,8 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); - /* Set the mac to wait the maximum time between each + /* + * Set the mac to wait the maximum time between each * iteration and increase the max iterations when * polling the phy; this fixes erroneous timeouts at 10Mbps. */ @@ -1333,7 +1352,8 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) { s32 ret_val = 0; - /* If there's an alternate MAC address place it in RAR0 + /* + * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ diff --git a/trunk/drivers/net/ethernet/intel/e1000e/82571.c b/trunk/drivers/net/ethernet/intel/e1000e/82571.c index c77d010d5c59..c98586408005 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/82571.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/82571.c @@ -26,7 +26,8 @@ *******************************************************************************/ -/* 82571EB Gigabit Ethernet Controller +/* + * 82571EB Gigabit Ethernet Controller * 82571EB Gigabit Ethernet Controller (Copper) * 82571EB Gigabit Ethernet Controller (Fiber) * 82571EB Dual Port Gigabit Mezzanine Adapter @@ -190,7 +191,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) if (((eecd >> 15) & 0x3) == 0x3) { nvm->type = e1000_nvm_flash_hw; nvm->word_size = 2048; - /* Autonomous Flash update bit must be cleared due + /* + * Autonomous Flash update bit must be cleared due * to Flash update issue. */ eecd &= ~E1000_EECD_AUPDEN; @@ -202,7 +204,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); - /* Added to a constant, "size" becomes the left-shift value + /* + * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; @@ -288,7 +291,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) /* FWSM register */ mac->has_fwsm = true; - /* ARC supported; valid only if manageability features are + /* + * ARC supported; valid only if manageability features are * enabled. */ mac->arc_subsystem_valid = !!(er32(FWSM) & @@ -310,7 +314,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) break; } - /* Ensure that the inter-port SWSM.SMBI lock bit is clear before + /* + * Ensure that the inter-port SWSM.SMBI lock bit is clear before * first NVM or PHY access. This should be done for single-port * devices, and for one port only on dual-port devices so that * for those devices we can still use the SMBI lock to synchronize @@ -347,8 +352,11 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) ew32(SWSM, swsm & ~E1000_SWSM_SMBI); } - /* Initialize device specific counter of SMBI acquisition timeouts. */ - hw->dev_spec.e82571.smb_counter = 0; + /* + * Initialize device specific counter of SMBI acquisition + * timeouts. + */ + hw->dev_spec.e82571.smb_counter = 0; return 0; } @@ -437,7 +445,8 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - /* The 82571 firmware may still be configuring the PHY. + /* + * The 82571 firmware may still be configuring the PHY. * In this case, we cannot access the PHY until the * configuration is done. So we explicitly set the * PHY ID. @@ -483,7 +492,8 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) s32 fw_timeout = hw->nvm.word_size + 1; s32 i = 0; - /* If we have timedout 3 times on trying to acquire + /* + * If we have timedout 3 times on trying to acquire * the inter-port SMBI semaphore, there is old code * operating on the other port, and it is not * releasing SMBI. Modify the number of times that @@ -777,7 +787,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) if (ret_val) return ret_val; - /* If our nvm is an EEPROM, then we're done + /* + * If our nvm is an EEPROM, then we're done * otherwise, commit the checksum to the flash NVM. */ if (hw->nvm.type != e1000_nvm_flash_hw) @@ -795,7 +806,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) /* Reset the firmware if using STM opcode. */ if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { - /* The enabling of and the actual reset must be done + /* + * The enabling of and the actual reset must be done * in two write cycles. */ ew32(HICR, E1000_HICR_FW_RESET_ENABLE); @@ -855,7 +867,8 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, u32 i, eewr = 0; s32 ret_val = 0; - /* A check for invalid values: offset too large, too many words, + /* + * A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -944,7 +957,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. @@ -988,7 +1002,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; - /* Prevent the PCI-E bus from sticking if there is no TLP connection + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); @@ -1006,7 +1021,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) usleep_range(10000, 20000); - /* Must acquire the MDIO ownership before MAC reset. + /* + * Must acquire the MDIO ownership before MAC reset. * Ownership defaults to firmware after a reset. */ switch (hw->mac.type) { @@ -1051,7 +1067,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) /* We don't want to continue accessing MAC registers. */ return ret_val; - /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + /* + * Phy configuration from NVM just starts after EECD_AUTO_RD is set. * Need to wait for Phy configuration completion before accessing * NVM and Phy. */ @@ -1059,7 +1076,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - /* REQ and GNT bits need to be cleared when using AUTO_RD + /* + * REQ and GNT bits need to be cleared when using AUTO_RD * to access the EEPROM. */ eecd = er32(EECD); @@ -1120,7 +1138,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) e_dbg("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); - /* Setup the receive address. + /* Setup the receive address. */ + /* * If, however, a locally administered address was assigned to the * 82571, we must reserve a RAR for it to work around an issue where * resetting one port will reload the MAC on the other port. @@ -1164,7 +1183,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) break; } - /* Clear all of the statistics registers (clear on read). It is + /* + * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. @@ -1261,7 +1281,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) ew32(PBA_ECC, reg); } - /* Workaround for hardware errata. + /* + * Workaround for hardware errata. * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 */ if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { @@ -1270,7 +1291,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) ew32(CTRL_EXT, reg); } - /* Disable IPv6 extension header parsing because some malformed + /* + * Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ if (hw->mac.type <= e1000_82573) { @@ -1287,7 +1309,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) reg |= (1 << 22); ew32(GCR, reg); - /* Workaround for hardware errata. + /* + * Workaround for hardware errata. * apply workaround for hardware errata documented in errata * docs Fixes issue where some error prone or unreliable PCIe * completions are occurring, particularly with ASPM enabled. @@ -1321,7 +1344,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: if (hw->mng_cookie.vlan_id != 0) { - /* The VFTA is a 4096b bit-field, each identifying + /* + * The VFTA is a 4096b bit-field, each identifying * a single VLAN ID. The following operations * determine which 32b entry (i.e. offset) into the * array we want to set the VLAN ID (i.e. bit) of @@ -1338,7 +1362,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw) break; } for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - /* If the offset we want to clear is the same offset of the + /* + * If the offset we want to clear is the same offset of the * manageability VLAN ID, then clear all bits except that of * the manageability unit. */ @@ -1376,7 +1401,8 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw) ctrl = hw->mac.ledctl_mode2; if (!(E1000_STATUS_LU & er32(STATUS))) { - /* If no link, then turn LED on by setting the invert bit + /* + * If no link, then turn LED on by setting the invert bit * for each LED that's "on" (0x0E) in ledctl_mode2. */ for (i = 0; i < 4; i++) @@ -1401,7 +1427,8 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) u16 receive_errors = 0; s32 ret_val = 0; - /* Read PHY Receive Error counter first, if its is max - all F's then + /* + * Read PHY Receive Error counter first, if its is max - all F's then * read the Base1000T status register If both are max then PHY is hung. */ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); @@ -1431,7 +1458,8 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) **/ static s32 e1000_setup_link_82571(struct e1000_hw *hw) { - /* 82573 does not have a word in the NVM to determine + /* + * 82573 does not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ @@ -1498,7 +1526,8 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - /* If SerDes loopback mode is entered, there is no form + /* + * If SerDes loopback mode is entered, there is no form * of reset to take the adapter out of that mode. So we * have to explicitly take the adapter out of loopback * mode. This prevents drivers from twiddling their thumbs @@ -1555,7 +1584,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) switch (mac->serdes_link_state) { case e1000_serdes_link_autoneg_complete: if (!(status & E1000_STATUS_LU)) { - /* We have lost link, retry autoneg before + /* + * We have lost link, retry autoneg before * reporting link failure */ mac->serdes_link_state = @@ -1568,7 +1598,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) break; case e1000_serdes_link_forced_up: - /* If we are receiving /C/ ordered sets, re-enable + /* + * If we are receiving /C/ ordered sets, re-enable * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. @@ -1588,7 +1619,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) case e1000_serdes_link_autoneg_progress: if (rxcw & E1000_RXCW_C) { - /* We received /C/ ordered sets, meaning the + /* + * We received /C/ ordered sets, meaning the * link partner has autonegotiated, and we can * trust the Link Up (LU) status bit. */ @@ -1604,7 +1636,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) e_dbg("AN_PROG -> DOWN\n"); } } else { - /* The link partner did not autoneg. + /* + * The link partner did not autoneg. * Force link up and full duplex, and change * state to forced. */ @@ -1627,7 +1660,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) case e1000_serdes_link_down: default: - /* The link was down but the receiver has now gained + /* + * The link was down but the receiver has now gained * valid sync, so lets see if we can bring the link * up. */ @@ -1645,7 +1679,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) mac->serdes_link_state = e1000_serdes_link_down; e_dbg("ANYSTATE -> DOWN\n"); } else { - /* Check several times, if SYNCH bit and CONFIG + /* + * Check several times, if SYNCH bit and CONFIG * bit both are consistently 1 then simply ignore * the IV bit and restart Autoneg */ @@ -1745,7 +1780,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) /* If workaround is activated... */ if (state) - /* Hold a copy of the LAA in RAR[14] This is done so that + /* + * Hold a copy of the LAA in RAR[14] This is done so that * between the time RAR[0] gets clobbered and the time it * gets fixed, the actual LAA is in one of the RARs and no * incoming packets directed to this port are dropped. @@ -1774,7 +1810,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) if (nvm->type != e1000_nvm_flash_hw) return 0; - /* Check bit 4 of word 10h. If it is 0, firmware is done updating + /* + * Check bit 4 of word 10h. If it is 0, firmware is done updating * 10h-12h. Checksum may need to be fixed. */ ret_val = e1000_read_nvm(hw, 0x10, 1, &data); @@ -1782,7 +1819,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) return ret_val; if (!(data & 0x10)) { - /* Read 0x23 and check bit 15. This bit is a 1 + /* + * Read 0x23 and check bit 15. This bit is a 1 * when the checksum has already been fixed. If * the checksum is still wrong and this bit is a * 1, we need to return bad checksum. Otherwise, @@ -1814,7 +1852,8 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) if (hw->mac.type == e1000_82571) { s32 ret_val = 0; - /* If there's an alternate MAC address place it in RAR0 + /* + * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ diff --git a/trunk/drivers/net/ethernet/intel/e1000e/defines.h b/trunk/drivers/net/ethernet/intel/e1000e/defines.h index 02a12b69555f..76edbc1be33b 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/defines.h +++ b/trunk/drivers/net/ethernet/intel/e1000e/defines.h @@ -185,7 +185,8 @@ #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ -/* Use byte values for the following shift parameters +/* + * Use byte values for the following shift parameters * Usage: * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & * E1000_PSRCTL_BSIZE0_MASK) | @@ -241,7 +242,8 @@ #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ -/* Bit definitions for the Management Data IO (MDIO) and Management Data +/* + * Bit definitions for the Management Data IO (MDIO) and Management Data * Clock (MDC) pins in the Device Control Register. */ @@ -422,7 +424,8 @@ #define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ #define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ -/* This defines the bits that are set in the Interrupt Mask +/* + * This defines the bits that are set in the Interrupt Mask * Set/Read Register. Each bit is documented below: * o RXT0 = Receiver Timer Interrupt (ring 0) * o TXDW = Transmit Descriptor Written Back @@ -472,7 +475,8 @@ /* 802.1q VLAN Packet Size */ #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ -/* Receive Address +/* Receive Address */ +/* * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * Technically, we have 16 spots. However, we reserve one of these spots @@ -719,7 +723,8 @@ #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ #define MAX_PHY_MULTI_PAGE_REG 0xF -/* Bit definitions for valid PHY IDs. +/* Bit definitions for valid PHY IDs. */ +/* * I = Integrated * E = External */ @@ -757,7 +762,8 @@ #define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ #define M88E1000_PSCR_AUTO_X_MODE 0x0060 -/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) * 0=Normal 10BASE-T Rx Threshold */ #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ @@ -773,12 +779,14 @@ #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 -/* Number of times we will attempt to autonegotiate before downshifting if we +/* + * Number of times we will attempt to autonegotiate before downshifting if we * are the master */ #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 -/* Number of times we will attempt to autonegotiate before downshifting if we +/* + * Number of times we will attempt to autonegotiate before downshifting if we * are the slave */ #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 @@ -800,7 +808,8 @@ #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ ((reg) & MAX_PHY_REG_ADDRESS)) -/* Bits... +/* + * Bits... * 15-5: page * 4-0: register offset */ diff --git a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h index 6782a2eea1bc..04668b47a1df 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h @@ -161,7 +161,8 @@ struct e1000_info; /* Time to wait before putting the device into D3 if there's no link (in ms). */ #define LINK_TIMEOUT 100 -/* Count for polling __E1000_RESET condition every 10-20msec. +/* + * Count for polling __E1000_RESET condition every 10-20msec. * Experimentation has shown the reset can take approximately 210msec. */ #define E1000_CHECK_RESET_COUNT 25 @@ -171,7 +172,8 @@ struct e1000_info; #define BURST_RDTR 0x20 #define BURST_RADV 0x20 -/* in the case of WTHRESH, it appears at least the 82571/2 hardware +/* + * in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when * WTHRESH=4, so a setting of 5 gives the most efficient bus * utilization but to avoid possible Tx stalls, set it to 1 @@ -212,7 +214,8 @@ struct e1000_ps_page { u64 dma; /* must be u64 - written to hw */ }; -/* wrappers around a pointer to a socket buffer, +/* + * wrappers around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct e1000_buffer { @@ -302,7 +305,9 @@ struct e1000_adapter { u16 tx_itr; u16 rx_itr; - /* Tx */ + /* + * Tx + */ struct e1000_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; u32 tx_fifo_limit; @@ -335,7 +340,9 @@ struct e1000_adapter { u32 tx_fifo_size; u32 tx_dma_failed; - /* Rx */ + /* + * Rx + */ bool (*clean_rx) (struct e1000_ring *ring, int *work_done, int work_to_do) ____cacheline_aligned_in_smp; void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, diff --git a/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c b/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c index f95bc6ee1c22..c11ac2756667 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -214,8 +214,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) mac->autoneg = 0; /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work - */ + * for the switch() below to work */ if ((spd & 1) || (dplx & ~1)) goto err_inval; @@ -264,7 +263,8 @@ static int e1000_set_settings(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* When SoL/IDER sessions are active, autoneg/speed/duplex + /* + * When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (hw->phy.ops.check_reset_block && @@ -273,7 +273,8 @@ static int e1000_set_settings(struct net_device *netdev, return -EINVAL; } - /* MDI setting is only allowed when autoneg enabled because + /* + * MDI setting is only allowed when autoneg enabled because * some hardware doesn't allow MDI setting when speed or * duplex is forced. */ @@ -315,7 +316,8 @@ static int e1000_set_settings(struct net_device *netdev, /* MDI-X => 2; MDI => 1; Auto => 3 */ if (ecmd->eth_tp_mdix_ctrl) { - /* fix up the value for auto (3 => 0) as zero is mapped + /* + * fix up the value for auto (3 => 0) as zero is mapped * internally to auto */ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) @@ -452,8 +454,8 @@ static void e1000_get_regs(struct net_device *netdev, regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ /* ethtool doesn't use anything past this point, so all this - * code is likely legacy junk for apps that may or may not exist - */ + * code is likely legacy junk for apps that may or may not + * exist */ if (hw->phy.type == e1000_phy_m88) { e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); regs_buff[13] = (u32)phy_data; /* cable length */ @@ -596,7 +598,8 @@ static int e1000_set_eeprom(struct net_device *netdev, if (ret_val) goto out; - /* Update the checksum over the first part of the EEPROM if needed + /* + * Update the checksum over the first part of the EEPROM if needed * and flush shadow RAM for applicable controllers */ if ((first_word <= NVM_CHECKSUM_REG) || @@ -620,7 +623,8 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, e1000e_driver_version, sizeof(drvinfo->version)); - /* EEPROM image version # is reported as firmware version # for + /* + * EEPROM image version # is reported as firmware version # for * PCI-E controllers */ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), @@ -704,7 +708,8 @@ static int e1000_set_ringparam(struct net_device *netdev, e1000e_down(adapter); - /* We can't just free everything and then setup again, because the + /* + * We can't just free everything and then setup again, because the * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring * structs. First, attempt to allocate new resources... */ @@ -808,7 +813,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) u32 mask; u32 wlock_mac = 0; - /* The status register is Read Only, so a write should fail. + /* + * The status register is Read Only, so a write should fail. * Some bits that get toggled are ignored. */ switch (mac->type) { @@ -990,7 +996,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) } if (!shared_int) { - /* Disable the interrupt to be reported in + /* + * Disable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was posted to the bus, the @@ -1008,7 +1015,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) } } - /* Enable the interrupt to be reported in + /* + * Enable the interrupt to be reported in * the cause register and then force the same * interrupt and see if one gets posted. If * an interrupt was not posted to the bus, the @@ -1026,7 +1034,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) } if (!shared_int) { - /* Disable the other interrupts to be reported in + /* + * Disable the other interrupts to be reported in * the cause register and then force the other * interrupts and see if any get posted. If * an interrupt was posted to the bus, the @@ -1369,7 +1378,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) hw->phy.type == e1000_phy_m88) { ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ } else { - /* Set the ILOS bit on the fiber Nic if half duplex link is + /* + * Set the ILOS bit on the fiber Nic if half duplex link is * detected. */ if ((er32(STATUS) & E1000_STATUS_FD) == 0) @@ -1378,7 +1388,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) ew32(CTRL, ctrl_reg); - /* Disable the receiver on the PHY so when a cable is plugged in, the + /* + * Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ if (hw->phy.type == e1000_phy_m88) @@ -1397,7 +1408,8 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) /* special requirements for 82571/82572 fiber adapters */ - /* jump through hoops to make sure link is up because serdes + /* + * jump through hoops to make sure link is up because serdes * link is hardwired up */ ctrl |= E1000_CTRL_SLU; @@ -1417,7 +1429,8 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) ew32(CTRL, ctrl); } - /* special write to serdes control register to enable SerDes analog + /* + * special write to serdes control register to enable SerDes analog * loopback */ #define E1000_SERDES_LB_ON 0x410 @@ -1435,7 +1448,8 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) u32 ctrlext = er32(CTRL_EXT); u32 ctrl = er32(CTRL); - /* save CTRL_EXT to restore later, reuse an empty variable (unused + /* + * save CTRL_EXT to restore later, reuse an empty variable (unused * on mac_type 80003es2lan) */ adapter->tx_fifo_head = ctrlext; @@ -1571,7 +1585,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ew32(RDT(0), rx_ring->count - 1); - /* Calculate the loop count based on the largest descriptor ring + /* + * Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ @@ -1612,7 +1627,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) l++; if (l == rx_ring->count) l = 0; - /* time + 20 msecs (200 msecs on 2.4) is more than + /* + * time + 20 msecs (200 msecs on 2.4) is more than * enough time to complete the receives, if it's * exceeded, break and error off */ @@ -1633,7 +1649,10 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; - /* PHY loopback cannot be performed if SoL/IDER sessions are active */ + /* + * PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) { e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); @@ -1667,7 +1686,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) int i = 0; hw->mac.serdes_has_link = false; - /* On some blade server designs, link establishment + /* + * On some blade server designs, link establishment * could take as long as 2-3 minutes */ do { @@ -1681,7 +1701,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) } else { hw->mac.ops.check_for_link(hw); if (hw->mac.autoneg) - /* On some Phy/switch combinations, link establishment + /* + * On some Phy/switch combinations, link establishment * can take a few seconds more than expected. */ msleep(5000); diff --git a/trunk/drivers/net/ethernet/intel/e1000e/hw.h b/trunk/drivers/net/ethernet/intel/e1000e/hw.h index cf217777586c..d37bfd96c987 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/hw.h +++ b/trunk/drivers/net/ethernet/intel/e1000e/hw.h @@ -85,7 +85,8 @@ enum e1e_registers { E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ -/* Convenience macros +/* + * Convenience macros * * Note: "_n" is the queue number of the register to be written to. * @@ -799,7 +800,8 @@ struct e1000_mac_operations { s32 (*read_mac_addr)(struct e1000_hw *); }; -/* When to use various PHY register access functions: +/* + * When to use various PHY register access functions: * * Func Caller * Function Does Does When to use diff --git a/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c b/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c index 976336547607..e3a7b07df629 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -26,7 +26,8 @@ *******************************************************************************/ -/* 82562G 10/100 Network Connection +/* + * 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection * 82562GT 10/100 Network Connection * 82562GT-2 10/100 Network Connection @@ -353,7 +354,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) return true; } - /* In case the PHY needs to be in mdio slow mode, + /* + * In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ hw->phy.ops.release(hw); @@ -384,7 +386,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) return ret_val; } - /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + /* + * The MAC-PHY interconnect may be in SMBus mode. If the PHY is * inaccessible and resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode. */ @@ -393,7 +396,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) if (e1000_phy_is_accessible_pchlan(hw)) break; - /* Before toggling LANPHYPC, see if PHY is accessible by + /* + * Before toggling LANPHYPC, see if PHY is accessible by * forcing MAC to SMBus mode first. */ mac_reg = er32(CTRL_EXT); @@ -402,7 +406,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: - /* Gate automatic PHY configuration by hardware on + /* + * Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && @@ -469,7 +474,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) hw->phy.ops.release(hw); - /* Reset the PHY before any access to it. Doing so, ensures + /* + * Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet. @@ -530,7 +536,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: - /* In case the PHY needs to be in mdio slow mode, + /* + * In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ ret_val = e1000_set_mdio_slow_mode_hv(hw); @@ -586,7 +593,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; - /* We may need to do this twice - once for IGP and if that fails, + /* + * We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again */ ret_val = e1000e_determine_phy_address(hw); @@ -671,7 +679,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) gfpreg = er32flash(ICH_FLASH_GFPREG); - /* sector_X_addr is a "sector"-aligned address (4096 bytes) + /* + * sector_X_addr is a "sector"-aligned address (4096 bytes) * Add 1 to sector_end_addr since this sector is included in * the overall size. */ @@ -681,7 +690,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) /* flash_base_addr is byte-aligned */ nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; - /* find total size of the NVM, then cut in half since the total + /* + * find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ nvm->flash_bank_size = (sector_end_addr - sector_base_addr) @@ -778,7 +788,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); - /* Gate automatic PHY configuration by hardware on managed + /* + * Gate automatic PHY configuration by hardware on managed * 82579 and i217 */ if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) && @@ -829,7 +840,8 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) goto release; e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability); - /* EEE is not supported in 100Half, so ignore partner's EEE + /* + * EEE is not supported in 100Half, so ignore partner's EEE * in 100 ability if full-duplex is not advertised. */ e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg); @@ -857,7 +869,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) bool link; u16 phy_reg; - /* We only want to go out to the PHY registers to see if Auto-Neg + /* + * We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. @@ -865,7 +878,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (!mac->get_link_status) return 0; - /* First we want to see if the MII Status Register reports + /* + * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ @@ -900,7 +914,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } - /* Workaround for PCHx parts in half-duplex: + /* + * Workaround for PCHx parts in half-duplex: * Set the number of preambles removed from the packet * when it is passed from the PHY to the MAC to prevent * the MAC from misinterpreting the packet type. @@ -917,7 +932,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) break; } - /* Check if there was DownShift, must be checked + /* + * Check if there was DownShift, must be checked * immediately after link-up */ e1000e_check_downshift(hw); @@ -927,19 +943,22 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - /* If we are forcing speed/duplex, then we simply return since + /* + * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; - /* Auto-Neg is enabled. Auto Speed Detection takes care + /* + * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); - /* Configure Flow Control now that Auto-Neg has completed. + /* + * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. @@ -981,7 +1000,8 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) if (rc) return rc; - /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or + /* + * Disable Jumbo Frame support on parts with Intel 10/100 PHY or * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). */ if ((adapter->hw.phy.type == e1000_phy_ife) || @@ -1171,7 +1191,8 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; - /* HW expects these in little endian so we reverse the byte order + /* + * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | @@ -1235,7 +1256,8 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) u32 rar_low, rar_high; u32 wlock_mac; - /* HW expects these in little endian so we reverse the byte order + /* + * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | @@ -1255,7 +1277,8 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) return; } - /* The manageability engine (ME) can lock certain SHRAR registers that + /* + * The manageability engine (ME) can lock certain SHRAR registers that * it is using - those registers are unavailable for use. */ if (index < hw->mac.rar_entry_count) { @@ -1364,7 +1387,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) s32 ret_val = 0; u16 word_addr, reg_data, reg_addr, phy_page = 0; - /* Initialize the PHY from the NVM on ICH platforms. This + /* + * Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is * not properly autoloaded after power transitions. * Therefore, after each PHY reset, we will load the @@ -1398,7 +1422,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) if (!(data & sw_cfg_mask)) goto release; - /* Make sure HW does not configure LCD from PHY + /* + * Make sure HW does not configure LCD from PHY * extended configuration before SW configuration */ data = er32(EXTCNF_CTRL); @@ -1418,7 +1443,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) if (((hw->mac.type == e1000_pchlan) && !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || (hw->mac.type > e1000_pchlan)) { - /* HW configures the SMBus address and LEDs when the + /* + * HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead. @@ -1722,7 +1748,8 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) } if (hw->phy.type == e1000_phy_82578) { - /* Return registers to default by doing a soft reset then + /* + * Return registers to default by doing a soft reset then * writing 0x3140 to the control register. */ if (hw->phy.revision < 2) { @@ -1742,7 +1769,8 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Configure the K1 Si workaround during phy reset assuming there is + /* + * Configure the K1 Si workaround during phy reset assuming there is * link so that it disables K1 if link is in 1Gbps. */ ret_val = e1000_k1_gig_workaround_hv(hw, true); @@ -1825,7 +1853,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; if (enable) { - /* Write Rx addresses (rar_entry_count for RAL/H, +4 for + /* + * Write Rx addresses (rar_entry_count for RAL/H, +4 for * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { @@ -2102,7 +2131,8 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) udelay(100); } while ((!data) && --loop); - /* If basic configuration is incomplete before the above loop + /* + * If basic configuration is incomplete before the above loop * count reaches 0, loading the configuration from NVM will * leave the PHY in a bad state possibly resulting in no link. */ @@ -2269,7 +2299,8 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) if (phy->type != e1000_phy_igp_3) return 0; - /* Call gig speed drop workaround on LPLU before accessing + /* + * Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) @@ -2288,7 +2319,8 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) if (phy->type != e1000_phy_igp_3) return 0; - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. @@ -2350,7 +2382,8 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) if (phy->type != e1000_phy_igp_3) return 0; - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. @@ -2387,7 +2420,8 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) if (phy->type != e1000_phy_igp_3) return 0; - /* Call gig speed drop workaround on LPLU before accessing + /* + * Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) @@ -2555,7 +2589,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); - /* Either we should have a hardware SPI cycle in progress + /* + * Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or * FDONE bit should be changed in the hardware so that it * is 1 after hardware reset, which can then be used as an @@ -2564,7 +2599,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) */ if (!hsfsts.hsf_status.flcinprog) { - /* There is no cycle running at present, + /* + * There is no cycle running at present, * so we can start a cycle. * Begin by setting Flash Cycle Done. */ @@ -2574,7 +2610,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) } else { s32 i; - /* Otherwise poll for sometime so the current + /* + * Otherwise poll for sometime so the current * cycle has a chance to end before giving up. */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { @@ -2586,7 +2623,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) udelay(1); } if (!ret_val) { - /* Successful in waiting for previous cycle to timeout, + /* + * Successful in waiting for previous cycle to timeout, * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; @@ -2715,7 +2753,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_READ_COMMAND_TIMEOUT); - /* Check if FCERR is set to 1, if set to 1, clear it + /* + * Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb @@ -2728,7 +2767,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, *data = (u16)(flash_data & 0x0000FFFF); break; } else { - /* If we've gotten here, then things are probably + /* + * If we've gotten here, then things are probably * completely hosed, but if the error condition is * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. @@ -2809,7 +2849,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) nvm->ops.acquire(hw); - /* We're writing to the opposite bank so if we're on bank 1, + /* + * We're writing to the opposite bank so if we're on bank 1, * write to bank 0 etc. We also need to erase the segment that * is going to be written */ @@ -2834,7 +2875,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) } for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { - /* Determine whether to write the value stored + /* + * Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ @@ -2848,7 +2890,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) break; } - /* If the word is 0x13, then make sure the signature bits + /* + * If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the * signature is valid. We want to do this after the write @@ -2877,7 +2920,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) break; } - /* Don't bother writing the segment valid bits if sector + /* + * Don't bother writing the segment valid bits if sector * programming failed. */ if (ret_val) { @@ -2886,7 +2930,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) goto release; } - /* Finally validate the new segment by setting bit 15:14 + /* + * Finally validate the new segment by setting bit 15:14 * to 10b in word 0x13 , this can be done without an * erase as well since these bits are 11 to start with * and we need to change bit 14 to 0b @@ -2903,7 +2948,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; - /* And invalidate the previously valid segment by setting + /* + * And invalidate the previously valid segment by setting * its signature word (0x13) high_byte to 0b. This can be * done without an erase because flash erase sets all bits * to 1's. We can write 1's to 0's without an erase @@ -2922,7 +2968,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) release: nvm->ops.release(hw); - /* Reload the EEPROM, or else modifications will not appear + /* + * Reload the EEPROM, or else modifications will not appear * until after the next adapter reset. */ if (!ret_val) { @@ -2950,7 +2997,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) s32 ret_val; u16 data; - /* Read 0x19 and check bit 6. If this bit is 0, the checksum + /* + * Read 0x19 and check bit 6. If this bit is 0, the checksum * needs to be fixed. This bit is an indication that the NVM * was prepared by OEM software and did not calculate the * checksum...a likely scenario. @@ -3000,7 +3048,8 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) pr0.range.wpe = true; ew32flash(ICH_FLASH_PR0, pr0.regval); - /* Lock down a subset of GbE Flash Control Registers, e.g. + /* + * Lock down a subset of GbE Flash Control Registers, e.g. * PR0 to prevent the write-protection from being lifted. * Once FLOCKDN is set, the registers protected by it cannot * be written until FLOCKDN is cleared by a hardware reset. @@ -3060,7 +3109,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ew32flash(ICH_FLASH_FDATA0, flash_data); - /* check if FCERR is set to 1 , if set to 1, clear it + /* + * check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ ret_val = e1000_flash_cycle_ich8lan(hw, @@ -3068,7 +3118,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, if (!ret_val) break; - /* If we're here, then things are most likely + /* + * If we're here, then things are most likely * completely hosed, but if the error condition * is detected, it won't hurt to give it another * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. @@ -3156,7 +3207,8 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); - /* Determine HW Sector size: Read BERASE bits of hw flash status + /* + * Determine HW Sector size: Read BERASE bits of hw flash status * register * 00: The Hw sector is 256 bytes, hence we need to erase 16 * consecutive sectors. The start index for the nth Hw sector @@ -3201,14 +3253,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) if (ret_val) return ret_val; - /* Write a value 11 (block Erase) in Flash + /* + * Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); - /* Write the last 24 bits of an index within the + /* + * Write the last 24 bits of an index within the * block into Flash Linear address field in Flash * Address. */ @@ -3220,7 +3274,8 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) if (!ret_val) break; - /* Check if FCERR is set to 1. If 1, + /* + * Check if FCERR is set to 1. If 1, * clear it and try the whole sequence * a few more times else Done */ @@ -3348,7 +3403,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) ret_val = e1000e_get_bus_info_pcie(hw); - /* ICH devices are "PCI Express"-ish. They have + /* + * ICH devices are "PCI Express"-ish. They have * a configuration space, but do not contain * PCI Express Capability registers, so bus width * must be hardcoded. @@ -3373,7 +3429,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) u32 ctrl, reg; s32 ret_val; - /* Prevent the PCI-E bus from sticking if there is no TLP connection + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000e_disable_pcie_master(hw); @@ -3383,7 +3440,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) e_dbg("Masking off all interrupts\n"); ew32(IMC, 0xffffffff); - /* Disable the Transmit and Receive units. Then delay to allow + /* + * Disable the Transmit and Receive units. Then delay to allow * any pending transactions to complete before we hit the MAC * with the global reset. */ @@ -3416,13 +3474,15 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ctrl = er32(CTRL); if (!hw->phy.ops.check_reset_block(hw)) { - /* Full-chip reset requires MAC and PHY reset at the same + /* + * Full-chip reset requires MAC and PHY reset at the same * time to make sure the interface between MAC and the * external PHY is reset. */ ctrl |= E1000_CTRL_PHY_RST; - /* Gate automatic PHY configuration by hardware on + /* + * Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && @@ -3456,7 +3516,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) return ret_val; } - /* For PCH, this write will make sure that any noise + /* + * For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up * as a bad packet to the DMA engine. */ @@ -3508,7 +3569,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); - /* The 82578 Rx buffer will stall if wakeup is enabled in host and + /* + * The 82578 Rx buffer will stall if wakeup is enabled in host and * the ME. Disable wakeup by clearing the host wakeup bit. * Reset the phy after disabling host wakeup to reset the Rx buffer. */ @@ -3538,7 +3600,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) E1000_TXDCTL_MAX_TX_DESC_PREFETCH; ew32(TXDCTL(1), txdctl); - /* ICH8 has opposite polarity of no_snoop bits. + /* + * ICH8 has opposite polarity of no_snoop bits. * By default, we should use snoop behavior. */ if (mac->type == e1000_ich8lan) @@ -3551,7 +3614,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) ctrl_ext |= E1000_CTRL_EXT_RO_DIS; ew32(CTRL_EXT, ctrl_ext); - /* Clear all of the statistics registers (clear on read). It is + /* + * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. @@ -3612,13 +3676,15 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(STATUS, reg); } - /* work-around descriptor data corruption issue during nfs v2 udp + /* + * work-around descriptor data corruption issue during nfs v2 udp * traffic, just disable the nfs filtering capability */ reg = er32(RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); - /* Disable IPv6 extension header parsing because some malformed + /* + * Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ if (hw->mac.type == e1000_ich8lan) @@ -3643,7 +3709,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) if (hw->phy.ops.check_reset_block(hw)) return 0; - /* ICH parts do not have a word in the NVM to determine + /* + * ICH parts do not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ @@ -3655,7 +3722,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) hw->fc.requested_mode = e1000_fc_full; } - /* Save off the requested flow control mode for use later. Depending + /* + * Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; @@ -3703,7 +3771,8 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ew32(CTRL, ctrl); - /* Set the mac to wait the maximum time between each iteration + /* + * Set the mac to wait the maximum time between each iteration * and increase the max iterations when polling the phy; * this fixes erroneous timeouts at 10Mbps. */ @@ -3823,7 +3892,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) if (!dev_spec->kmrn_lock_loss_workaround_enabled) return 0; - /* Make sure link is up before proceeding. If not just return. + /* + * Make sure link is up before proceeding. If not just return. * Attempting this while link is negotiating fouled up link * stability */ @@ -3855,7 +3925,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) E1000_PHY_CTRL_NOND0A_GBE_DISABLE); ew32(PHY_CTRL, phy_ctrl); - /* Call gig speed drop workaround on Gig disable before accessing + /* + * Call gig speed drop workaround on Gig disable before accessing * any PHY registers */ e1000e_gig_downshift_workaround_ich8lan(hw); @@ -3912,7 +3983,8 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) E1000_PHY_CTRL_NOND0A_GBE_DISABLE); ew32(PHY_CTRL, reg); - /* Call gig speed drop workaround on Gig disable before + /* + * Call gig speed drop workaround on Gig disable before * accessing any PHY registers */ if (hw->mac.type == e1000_ich8lan) @@ -4006,7 +4078,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) goto release; e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert); - /* Disable LPLU if both link partners support 100BaseT + /* + * Disable LPLU if both link partners support 100BaseT * EEE and 100Full is advertised on both ends of the * link. */ @@ -4018,7 +4091,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) E1000_PHY_CTRL_NOND0A_LPLU); } - /* For i217 Intel Rapid Start Technology support, + /* + * For i217 Intel Rapid Start Technology support, * when the system is going into Sx and no manageability engine * is present, the driver must configure proxy to reset only on * power good. LPI (Low Power Idle) state must also reset only @@ -4032,7 +4106,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); - /* Set bit enable LPI (EEE) to reset only on + /* + * Set bit enable LPI (EEE) to reset only on * power good. */ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); @@ -4045,7 +4120,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); } - /* Enable MTA to reset for Intel Rapid Start Technology + /* + * Enable MTA to reset for Intel Rapid Start Technology * Support */ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); @@ -4099,7 +4175,8 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) return; } - /* For i217 Intel Rapid Start Technology support when the system + /* + * For i217 Intel Rapid Start Technology support when the system * is transitioning from Sx and no manageability engine is present * configure SMBus to restore on reset, disable proxy, and enable * the reset on MTA (Multicast table array). @@ -4114,7 +4191,8 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) } if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { - /* Restore clear on SMB if no manageability engine + /* + * Restore clear on SMB if no manageability engine * is present */ ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); @@ -4220,7 +4298,8 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) u16 data = (u16)hw->mac.ledctl_mode2; u32 i, led; - /* If no link, then turn LED on by setting the invert bit + /* + * If no link, then turn LED on by setting the invert bit * for each LED that's mode is "link_up" in ledctl_mode2. */ if (!(er32(STATUS) & E1000_STATUS_LU)) { @@ -4250,7 +4329,8 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) u16 data = (u16)hw->mac.ledctl_mode1; u32 i, led; - /* If no link, then turn LED off by clearing the invert bit + /* + * If no link, then turn LED off by clearing the invert bit * for each LED that's mode is "link_up" in ledctl_mode1. */ if (!(er32(STATUS) & E1000_STATUS_LU)) { @@ -4295,7 +4375,8 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) } else { ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) { - /* When auto config read does not complete, do not + /* + * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ diff --git a/trunk/drivers/net/ethernet/intel/e1000e/mac.c b/trunk/drivers/net/ethernet/intel/e1000e/mac.c index 54d9dafaf126..a13439928488 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/mac.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/mac.c @@ -73,7 +73,8 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) struct e1000_bus_info *bus = &hw->bus; u32 reg; - /* The status register reports the correct function number + /* + * The status register reports the correct function number * for the device regardless of function swap state. */ reg = er32(STATUS); @@ -209,7 +210,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) return 0; } - /* We have a valid alternate MAC address, and we want to treat it the + /* + * We have a valid alternate MAC address, and we want to treat it the * same as the normal permanent MAC address stored by the HW into the * RAR. Do this by mapping this address into RAR0. */ @@ -231,7 +233,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; - /* HW expects these in little endian so we reverse the byte order + /* + * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | @@ -243,7 +246,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) if (rar_low || rar_high) rar_high |= E1000_RAH_AV; - /* Some bridges will combine consecutive 32-bit writes into + /* + * Some bridges will combine consecutive 32-bit writes into * a single burst write, which will malfunction on some parts. * The flushes avoid this. */ @@ -269,13 +273,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; - /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; - /* The portion of the address that is used for the hash table + /* + * The portion of the address that is used for the hash table * is determined by the mc_filter_type setting. * The algorithm is such that there is a total of 8 bits of shifting. * The bit_shift for a mc_filter_type of 0 represents the number of @@ -417,7 +423,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) s32 ret_val; bool link; - /* We only want to go out to the PHY registers to see if Auto-Neg + /* + * We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. @@ -425,7 +432,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) if (!mac->get_link_status) return 0; - /* First we want to see if the MII Status Register reports + /* + * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ @@ -438,24 +446,28 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) mac->get_link_status = false; - /* Check if there was DownShift, must be checked + /* + * Check if there was DownShift, must be checked * immediately after link-up */ e1000e_check_downshift(hw); - /* If we are forcing speed/duplex, then we simply return since + /* + * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; - /* Auto-Neg is enabled. Auto Speed Detection takes care + /* + * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); - /* Configure Flow Control now that Auto-Neg has completed. + /* + * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. @@ -486,7 +498,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) status = er32(STATUS); rxcw = er32(RXCW); - /* If we don't have link (auto-negotiation failed or link partner + /* + * If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), the cable is plugged in (we have signal), * and our link partner is not trying to auto-negotiate with us (we * are receiving idles or data), we need to force link up. We also @@ -517,7 +530,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* If we are forcing link and we are receiving /C/ ordered + /* + * If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. @@ -551,7 +565,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) status = er32(STATUS); rxcw = er32(RXCW); - /* If we don't have link (auto-negotiation failed or link partner + /* + * If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), and our link partner is not trying to * auto-negotiate with us (we are receiving idles or data), * we need to force link up. We also need to give auto-negotiation @@ -580,7 +595,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { - /* If we are forcing link and we are receiving /C/ ordered + /* + * If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. @@ -591,7 +607,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) mac->serdes_has_link = true; } else if (!(E1000_TXCW_ANE & er32(TXCW))) { - /* If we force link for non-auto-negotiation switch, check + /* + * If we force link for non-auto-negotiation switch, check * link status based on MAC synchronization for internal * serdes media type. */ @@ -648,7 +665,8 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) s32 ret_val; u16 nvm_data; - /* Read and store word 0x0F of the EEPROM. This word contains bits + /* + * Read and store word 0x0F of the EEPROM. This word contains bits * that determine the hardware's default PAUSE (flow control) mode, * a bit that determines whether the HW defaults to enabling or * disabling auto-negotiation, and the direction of the @@ -687,13 +705,15 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) { s32 ret_val; - /* In the case of the phy reset being blocked, we already have a link. + /* + * In the case of the phy reset being blocked, we already have a link. * We do not need to set it up again. */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) return 0; - /* If requested flow control is set to default, set flow control + /* + * If requested flow control is set to default, set flow control * based on the EEPROM flow control settings. */ if (hw->fc.requested_mode == e1000_fc_default) { @@ -702,7 +722,8 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) return ret_val; } - /* Save off the requested flow control mode for use later. Depending + /* + * Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; @@ -714,7 +735,8 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Initialize the flow control address, type, and PAUSE timer + /* + * Initialize the flow control address, type, and PAUSE timer * registers to their default values. This is done even if flow * control is disabled, because it does not hurt anything to * initialize these registers. @@ -741,7 +763,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) struct e1000_mac_info *mac = &hw->mac; u32 txcw; - /* Check for a software override of the flow control settings, and + /* + * Check for a software override of the flow control settings, and * setup the device accordingly. If auto-negotiation is enabled, then * software will have to set the "PAUSE" bits to the correct value in * the Transmit Config Word Register (TXCW) and re-start auto- @@ -763,7 +786,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); break; case e1000_fc_rx_pause: - /* Rx Flow control is enabled and Tx Flow control is disabled + /* + * Rx Flow control is enabled and Tx Flow control is disabled * by a software over-ride. Since there really isn't a way to * advertise that we are capable of Rx Pause ONLY, we will * advertise that we support both symmetric and asymmetric Rx @@ -773,13 +797,15 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; case e1000_fc_tx_pause: - /* Tx Flow control is enabled, and Rx Flow control is disabled, + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, * by a software over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); break; case e1000_fc_full: - /* Flow control (both Rx and Tx) is enabled by a software + /* + * Flow control (both Rx and Tx) is enabled by a software * over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); @@ -809,7 +835,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) u32 i, status; s32 ret_val; - /* If we have a signal (the cable is plugged in, or assumed true for + /* + * If we have a signal (the cable is plugged in, or assumed true for * serdes media) then poll for a "Link-Up" indication in the Device * Status Register. Time-out if a link isn't seen in 500 milliseconds * seconds (Auto-negotiation should complete in less than 500 @@ -824,7 +851,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) if (i == FIBER_LINK_UP_LIMIT) { e_dbg("Never got a valid link from auto-neg!!!\n"); mac->autoneg_failed = true; - /* AutoNeg failed to achieve a link, so we'll call + /* + * AutoNeg failed to achieve a link, so we'll call * mac->check_for_link. This routine will force the * link up if we detect a signal. This will allow us to * communicate with non-autonegotiating link partners. @@ -866,7 +894,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Since auto-negotiation is enabled, take the link out of reset (the + /* + * Since auto-negotiation is enabled, take the link out of reset (the * link will be in reset, because we previously reset the chip). This * will restart auto-negotiation. If auto-negotiation is successful * then the link-up status bit will be set and the flow control enable @@ -878,7 +907,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) e1e_flush(); usleep_range(1000, 2000); - /* For these adapters, the SW definable pin 1 is set when the optics + /* + * For these adapters, the SW definable pin 1 is set when the optics * detect a signal. If we have a signal, then poll for a "Link-Up" * indication. */ @@ -924,14 +954,16 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) { u32 fcrtl = 0, fcrth = 0; - /* Set the flow control receive threshold registers. Normally, + /* + * Set the flow control receive threshold registers. Normally, * these registers will be set to a default threshold that may be * adjusted later by the driver's runtime code. However, if the * ability to transmit pause frames is not enabled, then these * registers will be set to 0. */ if (hw->fc.current_mode & e1000_fc_tx_pause) { - /* We need to set up the Receive Threshold high and low water + /* + * We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of * XON frames. */ @@ -963,7 +995,8 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) ctrl = er32(CTRL); - /* Because we didn't get link via the internal auto-negotiation + /* + * Because we didn't get link via the internal auto-negotiation * mechanism (we either forced link or we got link via PHY * auto-neg), we have to manually enable/disable transmit an * receive flow control. @@ -1024,7 +1057,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; - /* Check for the case where we have fiber media and auto-neg failed + /* + * Check for the case where we have fiber media and auto-neg failed * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ @@ -1042,13 +1076,15 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) return ret_val; } - /* Check for the case where we have copper media and auto-neg is + /* + * Check for the case where we have copper media and auto-neg is * enabled. In this case, we need to check and see if Auto-Neg * has completed, and if so, how the PHY and link partner has * flow control configured. */ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { - /* Read the MII Status Register and check to see if AutoNeg + /* + * Read the MII Status Register and check to see if AutoNeg * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ @@ -1064,7 +1100,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) return ret_val; } - /* The AutoNeg process has completed, so we now need to + /* + * The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement * Register (Address 4) and the Auto_Negotiation Base * Page Ability Register (Address 5) to determine how @@ -1078,7 +1115,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Two bits in the Auto Negotiation Advertisement Register + /* + * Two bits in the Auto Negotiation Advertisement Register * (Address 4) and two bits in the Auto Negotiation Base * Page Ability Register (Address 5) determine flow control * for both the PHY and the link partner. The following @@ -1113,7 +1151,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) */ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { - /* Now we need to check if the user selected Rx ONLY + /* + * Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to @@ -1127,7 +1166,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) e_dbg("Flow Control = Rx PAUSE frames only.\n"); } } - /* For receiving PAUSE frames ONLY. + /* + * For receiving PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result @@ -1141,7 +1181,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) hw->fc.current_mode = e1000_fc_tx_pause; e_dbg("Flow Control = Tx PAUSE frames only.\n"); } - /* For transmitting PAUSE frames ONLY. + /* + * For transmitting PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result @@ -1155,14 +1196,16 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { - /* Per the IEEE spec, at this point flow control + /* + * Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; e_dbg("Flow Control = NONE.\n"); } - /* Now we need to do one last check... If we auto- + /* + * Now we need to do one last check... If we auto- * negotiated to HALF DUPLEX, flow control should not be * enabled per IEEE 802.3 spec. */ @@ -1175,7 +1218,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) if (duplex == HALF_DUPLEX) hw->fc.current_mode = e1000_fc_none; - /* Now we call a subroutine to actually force the MAC + /* + * Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ ret_val = e1000e_force_mac_fc(hw); @@ -1476,7 +1520,8 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw) ledctl_blink = E1000_LEDCTL_LED0_BLINK | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); } else { - /* set the blink bit for each LED that's "on" (0x0E) + /* + * set the blink bit for each LED that's "on" (0x0E) * in ledctl_mode2 */ ledctl_blink = hw->mac.ledctl_mode2; diff --git a/trunk/drivers/net/ethernet/intel/e1000e/manage.c b/trunk/drivers/net/ethernet/intel/e1000e/manage.c index 6dc47beb3adc..bacc950fc684 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/manage.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/manage.c @@ -143,7 +143,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) return hw->mac.tx_pkt_filtering; } - /* If we can't read from the host interface for whatever + /* + * If we can't read from the host interface for whatever * reason, disable filtering. */ ret_val = e1000_mng_enable_host_if(hw); @@ -162,7 +163,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) hdr->checksum = 0; csum = e1000_calculate_checksum((u8 *)hdr, E1000_MNG_DHCP_COOKIE_LENGTH); - /* If either the checksums or signature don't match, then + /* + * If either the checksums or signature don't match, then * the cookie area isn't considered valid, in which case we * take the safe route of assuming Tx filtering is enabled. */ @@ -250,7 +252,8 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, /* Calculate length in DWORDs */ length >>= 2; - /* The device driver writes the relevant command block into the + /* + * The device driver writes the relevant command block into the * ram area. */ for (i = 0; i < length; i++) { diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c index 6d06ed4e34b1..dadb13be479a 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c @@ -146,11 +146,9 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { {0, NULL} }; -/** +/* * e1000_regdump - register printout routine - * @hw: pointer to the HW structure - * @reginfo: pointer to the register info table - **/ + */ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) { int n = 0; @@ -198,10 +196,9 @@ static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, } } -/** +/* * e1000e_dump - Print registers, Tx-ring and Rx-ring - * @adapter: board private structure - **/ + */ static void e1000e_dump(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -626,7 +623,8 @@ static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { - /* Force memory writes to complete before letting h/w + /* + * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). @@ -694,7 +692,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, goto no_buffers; } } - /* Refresh the desc even if buffer_addrs + /* + * Refresh the desc even if buffer_addrs * didn't change because each write-back * erases this info. */ @@ -727,7 +726,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { - /* Force memory writes to complete before letting h/w + /* + * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). @@ -817,8 +817,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). - */ + * such as IA-64). */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_rdt_wa(rx_ring, i); @@ -892,7 +891,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, length = le16_to_cpu(rx_desc->wb.upper.length); - /* !EOP means multiple descriptors were used to store a single + /* + * !EOP means multiple descriptors were used to store a single * packet, if that's the case we need to toss it. In fact, we * need to toss every packet with the EOP bit clear and the * next frame that _does_ have the EOP bit set, as it is by @@ -933,7 +933,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, total_rx_bytes += length; total_rx_packets++; - /* code added for copybreak, this should improve + /* + * code added for copybreak, this should improve * performance for small packets with large amounts * of reassembly being done in the stack */ @@ -1031,13 +1032,15 @@ static void e1000_print_hw_hang(struct work_struct *work) if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { - /* May be block on write-back, flush and detect again + /* + * May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); /* execute the writes immediately */ e1e_flush(); - /* Due to rare timing issues, write to TIDV again to ensure + /* + * Due to rare timing issues, write to TIDV again to ensure * the write is successful */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); @@ -1166,7 +1169,8 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) } if (adapter->detect_tx_hung) { - /* Detect a transmit hang in hardware, this serializes the + /* + * Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ adapter->detect_tx_hung = false; @@ -1266,12 +1270,14 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, length); { - /* this looks ugly, but it seems compiler issues make + /* + * this looks ugly, but it seems compiler issues make * it more efficient than reusing j */ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); - /* page alloc/put takes too long and effects small + /* + * page alloc/put takes too long and effects small * packet throughput, so unsplit small packets and * save the alloc/put only valid in softirq (napi) * context to call kmap_* @@ -1282,7 +1288,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, ps_page = &buffer_info->ps_pages[0]; - /* there is no documentation about how to call + /* + * there is no documentation about how to call * kmap_atomic, so we can't hold the mapping * very long */ @@ -1479,16 +1486,14 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb_shinfo(rxtop)->nr_frags, buffer_info->page, 0, length); /* re-use the current skb, we only consumed the - * page - */ + * page */ buffer_info->skb = skb; skb = rxtop; rxtop = NULL; e1000_consume_page(buffer_info, skb, length); } else { /* no chain, got EOP, this buf is the packet - * copybreak to save the put_page/alloc_page - */ + * copybreak to save the put_page/alloc_page */ if (length <= copybreak && skb_tailroom(skb) >= length) { u8 *vaddr; @@ -1497,8 +1502,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, length); kunmap_atomic(vaddr); /* re-use the page, so don't erase - * buffer_info->page - */ + * buffer_info->page */ skb_put(skb, length); } else { skb_fill_page_desc(skb, 0, @@ -1652,17 +1656,22 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) struct e1000_hw *hw = &adapter->hw; u32 icr = er32(ICR); - /* read ICR disables interrupts using IAM */ + /* + * read ICR disables interrupts using IAM + */ + if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; - /* ICH8 workaround-- Call gig speed drop workaround on cable + /* + * ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers */ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && (!(er32(STATUS) & E1000_STATUS_LU))) schedule_work(&adapter->downshift_task); - /* 80003ES2LAN workaround-- For packet buffer work-around on + /* + * 80003ES2LAN workaround-- For packet buffer work-around on * link down event; disable receives here in the ISR and reset * adapter in watchdog */ @@ -1704,27 +1713,31 @@ static irqreturn_t e1000_intr(int irq, void *data) if (!icr || test_bit(__E1000_DOWN, &adapter->state)) return IRQ_NONE; /* Not our interrupt */ - /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + /* + * IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt */ if (!(icr & E1000_ICR_INT_ASSERTED)) return IRQ_NONE; - /* Interrupt Auto-Mask...upon reading ICR, + /* + * Interrupt Auto-Mask...upon reading ICR, * interrupts are masked. No need for the * IMC write */ if (icr & E1000_ICR_LSC) { hw->mac.get_link_status = true; - /* ICH8 workaround-- Call gig speed drop workaround on cable + /* + * ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers */ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && (!(er32(STATUS) & E1000_STATUS_LU))) schedule_work(&adapter->downshift_task); - /* 80003ES2LAN workaround-- + /* + * 80003ES2LAN workaround-- * For packet buffer work-around on link down event; * disable receives here in the ISR and * reset adapter in watchdog @@ -2456,7 +2469,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter) set_itr_now: if (new_itr != adapter->itr) { - /* this attempts to bias the interrupt rate towards Bulk + /* + * this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ @@ -2726,7 +2740,8 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) manc = er32(MANC); - /* enable receiving management packets to the host. this will probably + /* + * enable receiving management packets to the host. this will probably * generate destination unreachable messages from the host OS, but * the packets will be handled on SMBUS */ @@ -2739,7 +2754,8 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) break; case e1000_82574: case e1000_82583: - /* Check if IPMI pass-through decision filter already exists; + /* + * Check if IPMI pass-through decision filter already exists; * if so, enable it. */ for (i = 0, j = 0; i < 8; i++) { @@ -2811,7 +2827,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) u32 txdctl = er32(TXDCTL(0)); txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | E1000_TXDCTL_WTHRESH); - /* set up some performance related parameters to encourage the + /* + * set up some performance related parameters to encourage the * hardware to use the bus more efficiently in bursts, depends * on the tx_int_delay to be enabled, * wthresh = 1 ==> burst write is disabled to avoid Tx stalls @@ -2828,7 +2845,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { tarc = er32(TARC(0)); - /* set the speed mode bit, we'll clear it if we're not at + /* + * set the speed mode bit, we'll clear it if we're not at * gigabit link later */ #define SPEED_MODE_BIT (1 << 21) @@ -2949,7 +2967,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rfctl |= E1000_RFCTL_EXTEN; ew32(RFCTL, rfctl); - /* 82571 and greater support packet-split where the protocol + /* + * 82571 and greater support packet-split where the protocol * header is placed in skb->data and the packet data is * placed in pages hanging off of skb_shinfo(skb)->nr_frags. * In the case of a non-split, skb->data is linearly filled, @@ -2997,8 +3016,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* This is useful for sniffing bad packets. */ if (adapter->netdev->features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic - * in e1000e_set_rx_mode - */ + * in e1000e_set_rx_mode */ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ @@ -3053,7 +3071,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) usleep_range(10000, 20000); if (adapter->flags2 & FLAG2_DMA_BURST) { - /* set the writeback threshold (only takes effect if the RDTR + /* + * set the writeback threshold (only takes effect if the RDTR * is set). set GRAN=1 and write back up to 0x4 worth, and * enable prefetching of 0x20 Rx descriptors * granularity = 01 @@ -3064,7 +3083,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); - /* override the delay timers for enabling bursting, only if + /* + * override the delay timers for enabling bursting, only if * the value was not set by the user via module options */ if (adapter->rx_int_delay == DEFAULT_RDTR) @@ -3088,7 +3108,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(CTRL_EXT, ctrl_ext); e1e_flush(); - /* Setup the HW Rx Head and Tail Descriptor Pointers and + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; @@ -3109,7 +3130,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(RXCSUM, rxcsum); if (adapter->hw.mac.type == e1000_pch2lan) { - /* With jumbo frames, excessive C-state transition + /* + * With jumbo frames, excessive C-state transition * latencies result in dropped transactions. */ if (adapter->netdev->mtu > ETH_DATA_LEN) { @@ -3194,7 +3216,8 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) if (!netdev_uc_empty(netdev) && rar_entries) { struct netdev_hw_addr *ha; - /* write the addresses in reverse order to avoid write + /* + * write the addresses in reverse order to avoid write * combining */ netdev_for_each_uc_addr(ha, netdev) { @@ -3246,7 +3269,8 @@ static void e1000e_set_rx_mode(struct net_device *netdev) if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; } else { - /* Write addresses to the MTA, if the attempt fails + /* + * Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ @@ -3255,7 +3279,8 @@ static void e1000e_set_rx_mode(struct net_device *netdev) rctl |= E1000_RCTL_MPE; } e1000e_vlan_filter_enable(adapter); - /* Write addresses to available RAR registers, if there is not + /* + * Write addresses to available RAR registers, if there is not * sufficient space to store all the addresses then enable * unicast promiscuous mode */ @@ -3290,7 +3315,8 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) for (i = 0; i < 32; i++) ew32(RETA(i), 0); - /* Disable raw packet checksumming so that RSS hash is placed in + /* + * Disable raw packet checksumming so that RSS hash is placed in * descriptor on writeback. */ rxcsum = er32(RXCSUM); @@ -3382,7 +3408,8 @@ void e1000e_reset(struct e1000_adapter *adapter) ew32(PBA, pba); if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { - /* To maintain wire speed transmits, the Tx FIFO should be + /* + * To maintain wire speed transmits, the Tx FIFO should be * large enough to accommodate two full transmit packets, * rounded up to the next 1KB and expressed in KB. Likewise, * the Rx FIFO should be large enough to accommodate at least @@ -3394,7 +3421,8 @@ void e1000e_reset(struct e1000_adapter *adapter) tx_space = pba >> 16; /* lower 16 bits has Rx packet buffer allocation size in KB */ pba &= 0xffff; - /* the Tx fifo also stores 16 bytes of information about the Tx + /* + * the Tx fifo also stores 16 bytes of information about the Tx * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + @@ -3407,7 +3435,8 @@ void e1000e_reset(struct e1000_adapter *adapter) min_rx_space = ALIGN(min_rx_space, 1024); min_rx_space >>= 10; - /* If current Tx allocation is less than the min Tx FIFO size, + /* + * If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO * allocation, take space away from current Rx allocation */ @@ -3415,7 +3444,8 @@ void e1000e_reset(struct e1000_adapter *adapter) ((min_tx_space - tx_space) < pba)) { pba -= min_tx_space - tx_space; - /* if short on Rx space, Rx wins and must trump Tx + /* + * if short on Rx space, Rx wins and must trump Tx * adjustment */ if (pba < min_rx_space) @@ -3425,7 +3455,8 @@ void e1000e_reset(struct e1000_adapter *adapter) ew32(PBA, pba); } - /* flow control settings + /* + * flow control settings * * The high water mark must be low enough to fit one full frame * (or the size used for early receive) above it in the Rx FIFO. @@ -3459,7 +3490,8 @@ void e1000e_reset(struct e1000_adapter *adapter) fc->low_water = fc->high_water - 8; break; case e1000_pchlan: - /* Workaround PCH LOM adapter hangs with certain network + /* + * Workaround PCH LOM adapter hangs with certain network * loads. If hangs persist, try disabling Tx flow control. */ if (adapter->netdev->mtu > ETH_DATA_LEN) { @@ -3484,7 +3516,8 @@ void e1000e_reset(struct e1000_adapter *adapter) break; } - /* Alignment of Tx data is on an arbitrary byte boundary with the + /* + * Alignment of Tx data is on an arbitrary byte boundary with the * maximum size per Tx descriptor limited only to the transmit * allocation of the packet buffer minus 96 bytes with an upper * limit of 24KB due to receive synchronization limitations. @@ -3492,7 +3525,8 @@ void e1000e_reset(struct e1000_adapter *adapter) adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, 24 << 10); - /* Disable Adaptive Interrupt Moderation if 2 full packets cannot + /* + * Disable Adaptive Interrupt Moderation if 2 full packets cannot * fit in receive buffer. */ if (adapter->itr_setting & 0x3) { @@ -3515,7 +3549,8 @@ void e1000e_reset(struct e1000_adapter *adapter) /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); - /* For parts with AMT enabled, let the firmware know + /* + * For parts with AMT enabled, let the firmware know * that the network interface is in control */ if (adapter->flags & FLAG_HAS_AMT) @@ -3544,7 +3579,8 @@ void e1000e_reset(struct e1000_adapter *adapter) if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && !(adapter->flags & FLAG_SMART_POWER_DOWN)) { u16 phy_data = 0; - /* speed up time to link by disabling smart power down, ignore + /* + * speed up time to link by disabling smart power down, ignore * the return value of this function because there is nothing * different we would do if it failed */ @@ -3592,7 +3628,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter) /* execute the writes immediately */ e1e_flush(); - /* due to rare timing issues, write to TIDV/RDTR again to ensure the + /* + * due to rare timing issues, write to TIDV/RDTR again to ensure the * write is successful */ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); @@ -3610,7 +3647,8 @@ void e1000e_down(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 tctl, rctl; - /* signal that we're down so the interrupt handler does not + /* + * signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__E1000_DOWN, &adapter->state); @@ -3653,7 +3691,8 @@ void e1000e_down(struct e1000_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); - /* TODO: for power management, we could drop the link and + /* + * TODO: for power management, we could drop the link and * pci_disable_device here. */ } @@ -3716,7 +3755,8 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) e_dbg("icr is %08X\n", icr); if (icr & E1000_ICR_RXSEQ) { adapter->flags &= ~FLAG_MSI_TEST_FAILED; - /* Force memory writes to complete before acknowledging the + /* + * Force memory writes to complete before acknowledging the * interrupt is handled. */ wmb(); @@ -3746,8 +3786,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) e1000e_reset_interrupt_capability(adapter); /* Assume that the test fails, if it succeeds then the test - * MSI irq handler will unset this flag - */ + * MSI irq handler will unset this flag */ adapter->flags |= FLAG_MSI_TEST_FAILED; err = pci_enable_msi(adapter->pdev); @@ -3761,7 +3800,8 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) goto msi_test_failed; } - /* Force memory writes to complete before enabling and firing an + /* + * Force memory writes to complete before enabling and firing an * interrupt. */ wmb(); @@ -3861,7 +3901,8 @@ static int e1000_open(struct net_device *netdev) if (err) goto err_setup_rx; - /* If AMT is enabled, let the firmware know that the network + /* + * If AMT is enabled, let the firmware know that the network * interface is now open and reset the part to a known state. */ if (adapter->flags & FLAG_HAS_AMT) { @@ -3882,7 +3923,8 @@ static int e1000_open(struct net_device *netdev) PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - /* before we allocate an interrupt, we must be ready to handle it. + /* + * before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. @@ -3893,7 +3935,8 @@ static int e1000_open(struct net_device *netdev) if (err) goto err_req_irq; - /* Work around PCIe errata with MSI interrupts causing some chipsets to + /* + * Work around PCIe errata with MSI interrupts causing some chipsets to * ignore e1000e MSI messages, which means we need to test our MSI * interrupt now */ @@ -3974,14 +4017,16 @@ static int e1000_close(struct net_device *netdev) e1000e_free_tx_resources(adapter->tx_ring); e1000e_free_rx_resources(adapter->rx_ring); - /* kill manageability vlan ID if supported, but not if a vlan with + /* + * kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it) */ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - /* If AMT is enabled, let the firmware know that the network + /* + * If AMT is enabled, let the firmware know that the network * interface is now closed */ if ((adapter->flags & FLAG_HAS_AMT) && @@ -4020,7 +4065,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p) /* activate the work around */ e1000e_set_laa_state_82571(&adapter->hw, 1); - /* Hold a copy of the LAA in RAR[14] This is done so that + /* + * Hold a copy of the LAA in RAR[14] This is done so that * between the time RAR[0] gets clobbered and the time it * gets fixed (in e1000_watchdog), the actual LAA is in one * of the RARs and no incoming packets directed to this port @@ -4053,13 +4099,10 @@ static void e1000e_update_phy_task(struct work_struct *work) e1000_get_phy_info(&adapter->hw); } -/** - * e1000_update_phy_info - timre call-back to update PHY info - * @data: pointer to adapter cast into an unsigned long - * +/* * Need to wait a few seconds after link up to get diagnostic information from * the phy - **/ + */ static void e1000_update_phy_info(unsigned long data) { struct e1000_adapter *adapter = (struct e1000_adapter *) data; @@ -4086,7 +4129,8 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter) if (ret_val) return; - /* A page set is expensive so check if already on desired page. + /* + * A page set is expensive so check if already on desired page. * If not, set to the page with the PHY status registers. */ hw->phy.addr = 1; @@ -4157,7 +4201,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - /* Prevent stats update while adapter is being reset, or if the pci + /* + * Prevent stats update while adapter is being reset, or if the pci * connection is down. */ if (adapter->link_speed == 0) @@ -4225,7 +4270,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) /* Rx Errors */ - /* RLEC on some newer hardware can be incorrect so build + /* + * RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ netdev->stats.rx_errors = adapter->stats.rxerrc + @@ -4277,7 +4323,8 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) if (ret_val) e_warn("Error reading PHY register\n"); } else { - /* Do not read PHY registers if link is not up + /* + * Do not read PHY registers if link is not up * Set values to typical power-on defaults */ phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); @@ -4315,7 +4362,8 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) bool link_active = false; s32 ret_val = 0; - /* get_link_status is set on LSC (link status) interrupt or + /* + * get_link_status is set on LSC (link status) interrupt or * Rx sequence error interrupt. get_link_status will stay * false until the check_for_link establishes link * for copper adapters ONLY @@ -4367,7 +4415,8 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - /* With 82574 controllers, PHY needs to be checked periodically + /* + * With 82574 controllers, PHY needs to be checked periodically * for hung state and reset, if two calls return true */ if (e1000_check_phy_82574(hw)) @@ -4435,7 +4484,8 @@ static void e1000_watchdog_task(struct work_struct *work) &adapter->link_speed, &adapter->link_duplex); e1000_print_link_info(adapter); - /* On supported PHYs, check for duplex mismatch only + /* + * On supported PHYs, check for duplex mismatch only * if link has autonegotiated at 10/100 half */ if ((hw->phy.type == e1000_phy_igp_3 || @@ -4465,7 +4515,8 @@ static void e1000_watchdog_task(struct work_struct *work) break; } - /* workaround: re-program speed mode bit after + /* + * workaround: re-program speed mode bit after * link-up event */ if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && @@ -4476,7 +4527,8 @@ static void e1000_watchdog_task(struct work_struct *work) ew32(TARC(0), tarc0); } - /* disable TSO for pcie and 10/100 speeds, to avoid + /* + * disable TSO for pcie and 10/100 speeds, to avoid * some hardware issues */ if (!(adapter->flags & FLAG_TSO_FORCE)) { @@ -4497,14 +4549,16 @@ static void e1000_watchdog_task(struct work_struct *work) } } - /* enable transmits in the hardware, need to do this + /* + * enable transmits in the hardware, need to do this * after setting TARC(0) */ tctl = er32(TCTL); tctl |= E1000_TCTL_EN; ew32(TCTL, tctl); - /* Perform any post-link-up configuration before + /* + * Perform any post-link-up configuration before * reporting link up. */ if (phy->ops.cfg_on_link_up) @@ -4555,7 +4609,8 @@ static void e1000_watchdog_task(struct work_struct *work) if (!netif_carrier_ok(netdev) && (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { - /* We've lost link, so the controller stops DMA, + /* + * We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). @@ -4567,7 +4622,8 @@ static void e1000_watchdog_task(struct work_struct *work) /* Simple mode for Interrupt Throttle Rate (ITR) */ if (adapter->itr_setting == 4) { - /* Symmetric Tx/Rx gets a reduced ITR=2000; + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; * Total asymmetrical Tx or Rx gets ITR=8000; * everyone else is between 2000-8000. */ @@ -4592,7 +4648,8 @@ static void e1000_watchdog_task(struct work_struct *work) /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = true; - /* With 82571 controllers, LAA may be overwritten due to controller + /* + * With 82571 controllers, LAA may be overwritten due to controller * reset from the other port. Set the appropriate LAA in RAR[0] */ if (e1000e_get_laa_state_82571(hw)) @@ -4891,7 +4948,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); - /* Force memory writes to complete before letting h/w + /* + * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). @@ -4905,7 +4963,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) else writel(i, tx_ring->tail); - /* we need this if more than one processor can write to our tail + /* + * we need this if more than one processor can write to our tail * at a time, it synchronizes IO on IA64/Altix systems */ mmiowb(); @@ -4955,13 +5014,15 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) struct e1000_adapter *adapter = tx_ring->adapter; netif_stop_queue(adapter->netdev); - /* Herbert's original patch had: + /* + * Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); - /* We need to check again in a case another CPU has just + /* + * We need to check again in a case another CPU has just * made room available. */ if (e1000_desc_unused(tx_ring) < size) @@ -5006,7 +5067,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } - /* The minimum packet size with TCTL.PSP set is 17 bytes so + /* + * The minimum packet size with TCTL.PSP set is 17 bytes so * pad skb in order to meet this minimum size requirement */ if (unlikely(skb->len < 17)) { @@ -5020,12 +5082,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (mss) { u8 hdr_len; - /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + /* + * TSO Workaround for 82571/2/3 Controllers -- if skb->data * points to just header, pull a few bytes of payload from * frags into skb->data */ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - /* we do this workaround for ES2LAN, but it is un-necessary, + /* + * we do this workaround for ES2LAN, but it is un-necessary, * avoiding it could save a lot of cycles */ if (skb->data_len && (hdr_len == len)) { @@ -5056,7 +5120,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); - /* need: count + 2 desc gap to keep tail from touching + /* + * need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ if (e1000_maybe_stop_tx(tx_ring, count + 2)) @@ -5080,7 +5145,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, else if (e1000_tx_csum(tx_ring, skb)) tx_flags |= E1000_TX_FLAGS_CSUM; - /* Old method was to assume IPv4 packet by default if TSO was enabled. + /* + * Old method was to assume IPv4 packet by default if TSO was enabled. * 82571 hardware supports TSO capabilities for IPv6 as well... * no longer assume, we must. */ @@ -5167,7 +5233,8 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, /* Rx Errors */ - /* RLEC on some newer hardware can be incorrect so build + /* + * RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ stats->rx_errors = adapter->stats.rxerrc + @@ -5236,7 +5303,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) e1000e_down(adapter); - /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab @@ -5498,7 +5566,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - /* Release control of h/w to f/w. If f/w is AMT enabled, this + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000e_release_hw_control(adapter); @@ -5525,7 +5594,8 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - /* The pci-e switch on some quad port adapters will report a + /* + * The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the * downstream port of the pci-e switch. @@ -5554,7 +5624,8 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) #else static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { - /* Both device and parent should have the same ASPM setting. + /* + * Both device and parent should have the same ASPM setting. * Disable ASPM in downstream component first and then upstream. */ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); @@ -5648,7 +5719,8 @@ static int __e1000_resume(struct pci_dev *pdev) netif_device_attach(netdev); - /* If the controller has AMT, do not set DRV_LOAD until the interface + /* + * If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ @@ -5776,10 +5848,7 @@ static irqreturn_t e1000_intr_msix(int irq, void *data) return IRQ_HANDLED; } -/** - * e1000_netpoll - * @netdev: network interface device structure - * +/* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. @@ -5904,7 +5973,8 @@ static void e1000_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); - /* If the controller has AMT, do not set DRV_LOAD until the interface + /* + * If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ @@ -6203,12 +6273,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (e1000e_enable_mng_pass_thru(&adapter->hw)) adapter->flags |= FLAG_MNG_PT_ENABLED; - /* before reading the NVM, reset the controller to + /* + * before reading the NVM, reset the controller to * put the device in a known good starting state */ adapter->hw.mac.ops.reset_hw(&adapter->hw); - /* systems with ASPM and others may see the checksum fail on the first + /* + * systems with ASPM and others may see the checksum fail on the first * attempt. Let's give it a few tries */ for (i = 0;; i++) { @@ -6263,7 +6335,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->rx_ring->count = E1000_DEFAULT_RXD; adapter->tx_ring->count = E1000_DEFAULT_TXD; - /* Initial Wake on LAN setting - If APM wake is enabled in + /* + * Initial Wake on LAN setting - If APM wake is enabled in * the EEPROM, enable the ACPI Magic Packet filter */ if (adapter->flags & FLAG_APME_IN_WUC) { @@ -6287,7 +6360,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (eeprom_data & eeprom_apme_mask) adapter->eeprom_wol |= E1000_WUFC_MAG; - /* now that we have the eeprom settings, apply the special cases + /* + * now that we have the eeprom settings, apply the special cases * where the eeprom may be wrong or the board simply won't support * wake on lan on a particular port */ @@ -6304,7 +6378,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* reset the hardware with the new settings */ e1000e_reset(adapter); - /* If the controller has AMT, do not set DRV_LOAD until the interface + /* + * If the controller has AMT, do not set DRV_LOAD until the interface * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ @@ -6367,7 +6442,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) struct e1000_adapter *adapter = netdev_priv(netdev); bool down = test_bit(__E1000_DOWN, &adapter->state); - /* The timers may be rescheduled, so explicitly disable them + /* + * The timers may be rescheduled, so explicitly disable them * from being rescheduled. */ if (!down) @@ -6392,7 +6468,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); - /* Release control of h/w to f/w. If f/w is AMT enabled, this + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ e1000e_release_hw_control(adapter); diff --git a/trunk/drivers/net/ethernet/intel/e1000e/nvm.c b/trunk/drivers/net/ethernet/intel/e1000e/nvm.c index b6468804cb2e..a969f1af1b4e 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/nvm.c @@ -279,7 +279,8 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) e1e_flush(); udelay(1); - /* Read "Status Register" repeatedly until the LSB is cleared. + /* + * Read "Status Register" repeatedly until the LSB is cleared. * The EEPROM will signal that the command has been completed * by clearing bit 0 of the internal status register. If it's * not cleared within 'timeout', then error out. @@ -320,7 +321,8 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) u32 i, eerd = 0; s32 ret_val = 0; - /* A check for invalid values: offset too large, too many words, + /* + * A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -362,7 +364,8 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) s32 ret_val; u16 widx = 0; - /* A check for invalid values: offset too large, too many words, + /* + * A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || @@ -390,7 +393,8 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) e1000_standby_nvm(hw); - /* Some SPI eeproms use the 8th address bit embedded in the + /* + * Some SPI eeproms use the 8th address bit embedded in the * opcode */ if ((nvm->address_bits == 8) && (offset >= 128)) @@ -457,7 +461,8 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, return ret_val; } - /* if nvm_data is not ptr guard the PBA must be in legacy format which + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ diff --git a/trunk/drivers/net/ethernet/intel/e1000e/param.c b/trunk/drivers/net/ethernet/intel/e1000e/param.c index 1fbb31554e4d..dfbfa7fd98c3 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/param.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/param.c @@ -32,9 +32,11 @@ #include "e1000.h" -/* This is the only thing that needs to be changed to adjust the +/* + * This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ + #define E1000_MAX_NIC 32 #define OPTION_UNSET -1 @@ -47,10 +49,12 @@ module_param(copybreak, uint, 0644); MODULE_PARM_DESC(copybreak, "Maximum size of packet that is copied to a new buffer on receive"); -/* All parameters are treated the same, as an integer array of values. +/* + * All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ + #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } #define E1000_PARAM(X, desc) \ static int __devinitdata X[E1000_MAX_NIC+1] \ @@ -59,7 +63,8 @@ MODULE_PARM_DESC(copybreak, module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); -/* Transmit Interrupt Delay in units of 1.024 microseconds +/* + * Transmit Interrupt Delay in units of 1.024 microseconds * Tx interrupt delay needs to typically be set to something non-zero * * Valid Range: 0-65535 @@ -69,7 +74,8 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); #define MAX_TXDELAY 0xFFFF #define MIN_TXDELAY 0 -/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds +/* + * Transmit Absolute Interrupt Delay in units of 1.024 microseconds * * Valid Range: 0-65535 */ @@ -78,7 +84,8 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); #define MAX_TXABSDELAY 0xFFFF #define MIN_TXABSDELAY 0 -/* Receive Interrupt Delay in units of 1.024 microseconds +/* + * Receive Interrupt Delay in units of 1.024 microseconds * hardware will likely hang if you set this to anything but zero. * * Valid Range: 0-65535 @@ -87,7 +94,8 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); #define MAX_RXDELAY 0xFFFF #define MIN_RXDELAY 0 -/* Receive Absolute Interrupt Delay in units of 1.024 microseconds +/* + * Receive Absolute Interrupt Delay in units of 1.024 microseconds * * Valid Range: 0-65535 */ @@ -95,7 +103,8 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); #define MAX_RXABSDELAY 0xFFFF #define MIN_RXABSDELAY 0 -/* Interrupt Throttle Rate (interrupts/sec) +/* + * Interrupt Throttle Rate (interrupts/sec) * * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative */ @@ -104,7 +113,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); #define MAX_ITR 100000 #define MIN_ITR 100 -/* IntMode (Interrupt Mode) +/* + * IntMode (Interrupt Mode) * * Valid Range: varies depending on kernel configuration & hardware support * @@ -122,7 +132,8 @@ E1000_PARAM(IntMode, "Interrupt Mode"); #define MAX_INTMODE 2 #define MIN_INTMODE 0 -/* Enable Smart Power Down of the PHY +/* + * Enable Smart Power Down of the PHY * * Valid Range: 0, 1 * @@ -130,7 +141,8 @@ E1000_PARAM(IntMode, "Interrupt Mode"); */ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); -/* Enable Kumeran Lock Loss workaround +/* + * Enable Kumeran Lock Loss workaround * * Valid Range: 0, 1 * @@ -138,7 +150,8 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); */ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); -/* Write Protect NVM +/* + * Write Protect NVM * * Valid Range: 0, 1 * @@ -146,7 +159,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); */ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); -/* Enable CRC Stripping +/* + * Enable CRC Stripping * * Valid Range: 0, 1 * @@ -337,7 +351,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) if (num_InterruptThrottleRate > bd) { adapter->itr = InterruptThrottleRate[bd]; - /* Make sure a message is printed for non-special + /* + * Make sure a message is printed for non-special * values. And in case of an invalid option, display * warning, use default and go through itr/itr_setting * adjustment logic below @@ -346,12 +361,14 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) e1000_validate_option(&adapter->itr, &opt, adapter)) adapter->itr = opt.def; } else { - /* If no option specified, use default value and go + /* + * If no option specified, use default value and go * through the logic below to adjust itr/itr_setting */ adapter->itr = opt.def; - /* Make sure a message is printed for non-special + /* + * Make sure a message is printed for non-special * default values */ if (adapter->itr > 4) @@ -383,7 +400,8 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) opt.name); break; default: - /* Save the setting, because the dynamic bits + /* + * Save the setting, because the dynamic bits * change itr. * * Clear the lower two bits because diff --git a/trunk/drivers/net/ethernet/intel/e1000e/phy.c b/trunk/drivers/net/ethernet/intel/e1000e/phy.c index 28b38ff37e84..fc62a3f3a5be 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/phy.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/phy.c @@ -193,7 +193,8 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) return -E1000_ERR_PARAM; } - /* Set up Op-code, Phy Address, and register offset in the MDI + /* + * Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ @@ -203,7 +204,8 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) ew32(MDIC, mdic); - /* Poll the ready bit to see if the MDI read completed + /* + * Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ @@ -223,7 +225,8 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) } *data = (u16) mdic; - /* Allow some time after each MDIC transaction to avoid + /* + * Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) @@ -250,7 +253,8 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) return -E1000_ERR_PARAM; } - /* Set up Op-code, Phy Address, and register offset in the MDI + /* + * Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ @@ -261,7 +265,8 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) ew32(MDIC, mdic); - /* Poll the ready bit to see if the MDI read completed + /* + * Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ @@ -280,7 +285,8 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) return -E1000_ERR_PHY; } - /* Allow some time after each MDIC transaction to avoid + /* + * Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) @@ -702,7 +708,8 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) if (ret_val) return ret_val; phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; - /* Options: + /* + * Options: * 0 - Auto (default) * 1 - MDI mode * 2 - MDI-X mode @@ -747,7 +754,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) if (phy->type != e1000_phy_bm) phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - /* Options: + /* + * Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode @@ -772,7 +780,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) break; } - /* Options: + /* + * Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled @@ -809,7 +818,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) if ((phy->type == e1000_phy_m88) && (phy->revision < E1000_REVISION_4) && (phy->id != BME1000_E_PHY_ID_R2)) { - /* Force TX_CLK in the Extended PHY Specific Control Register + /* + * Force TX_CLK in the Extended PHY Specific Control Register * to 25MHz clock. */ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); @@ -889,7 +899,8 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) return ret_val; } - /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + /* + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ msleep(100); @@ -925,7 +936,8 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { - /* when autonegotiation advertisement is only 1000Mbps then we + /* + * when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ @@ -989,14 +1001,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) return ret_val; } - /* Need to parse both autoneg_advertised and fc and set up + /* + * Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ - /* First we clear all the 10/100 mb speed bits in the Auto-Neg + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ @@ -1042,7 +1056,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } - /* Check for a software override of the flow control settings, and + /* + * Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation @@ -1061,13 +1076,15 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ switch (hw->fc.current_mode) { case e1000_fc_none: - /* Flow control (Rx & Tx) is completely disabled by a + /* + * Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_rx_pause: - /* Rx Flow control is enabled, and Tx Flow control is + /* + * Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are @@ -1079,14 +1096,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_tx_pause: - /* Tx Flow control is enabled, and Rx Flow control is + /* + * Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; break; case e1000_fc_full: - /* Flow control (both Rx and Tx) is enabled by a software + /* + * Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); @@ -1123,12 +1142,14 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) s32 ret_val; u16 phy_ctrl; - /* Perform some bounds checking on the autoneg advertisement + /* + * Perform some bounds checking on the autoneg advertisement * parameter. */ phy->autoneg_advertised &= phy->autoneg_mask; - /* If autoneg_advertised is zero, we assume it was not defaulted + /* + * If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ if (!phy->autoneg_advertised) @@ -1142,7 +1163,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) } e_dbg("Restarting Auto-Neg\n"); - /* Restart auto-negotiation by setting the Auto Neg Enable bit and + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and * the Auto Neg Restart bit in the PHY control register. */ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); @@ -1154,7 +1176,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Does the user want to wait for Auto-Neg to complete here, or + /* + * Does the user want to wait for Auto-Neg to complete here, or * check at a later time (for example, callback routine). */ if (phy->autoneg_wait_to_complete) { @@ -1185,14 +1208,16 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) bool link; if (hw->mac.autoneg) { - /* Setup autoneg and flow control advertisement and perform + /* + * Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) return ret_val; } else { - /* PHY will be set to 10H, 10F, 100H or 100F + /* + * PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ e_dbg("Forcing Speed and Duplex\n"); @@ -1203,7 +1228,8 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) } } - /* Check link status. Wait up to 100 microseconds for link to become + /* + * Check link status. Wait up to 100 microseconds for link to become * valid. */ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, @@ -1247,7 +1273,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); @@ -1301,7 +1328,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) u16 phy_data; bool link; - /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); @@ -1342,7 +1370,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (hw->phy.type != e1000_phy_m88) { e_dbg("Link taking longer than expected.\n"); } else { - /* We didn't get link. + /* + * We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, @@ -1369,7 +1398,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Resetting the phy means we need to re-force TX_CLK in the + /* + * Resetting the phy means we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock from * the reset value of 2.5MHz. */ @@ -1378,7 +1408,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - /* In addition, we must re-enable CRS on Tx for both half and full + /* + * In addition, we must re-enable CRS on Tx for both half and full * duplex. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); @@ -1542,7 +1573,8 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; - /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. @@ -1670,7 +1702,8 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) s32 ret_val; u16 data, offset, mask; - /* Polarity is determined based on the speed of + /* + * Polarity is determined based on the speed of * our connection. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); @@ -1682,7 +1715,8 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) offset = IGP01E1000_PHY_PCS_INIT_REG; mask = IGP01E1000_PHY_POLARITY_MASK; } else { - /* This really only applies to 10Mbps since + /* + * This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ offset = IGP01E1000_PHY_PORT_STATUS; @@ -1711,7 +1745,8 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) s32 ret_val; u16 phy_data, offset, mask; - /* Polarity is determined based on the reversal feature being enabled. + /* + * Polarity is determined based on the reversal feature being enabled. */ if (phy->polarity_correction) { offset = IFE_PHY_EXTENDED_STATUS_CONTROL; @@ -1756,7 +1791,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) msleep(100); } - /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation * has completed. */ return ret_val; @@ -1778,13 +1814,15 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u16 i, phy_status; for (i = 0; i < iterations; i++) { - /* Some PHYs require the PHY_STATUS register to be read + /* + * Some PHYs require the PHY_STATUS register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) - /* If the first read fails, another entity may have + /* + * If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ @@ -1875,7 +1913,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) if (ret_val) return ret_val; - /* Getting bits 15:9, which represent the combination of + /* + * Getting bits 15:9, which represent the combination of * coarse and fine gain values. The result is a number * that can be put into the lookup table to obtain the * approximate cable length. @@ -2246,13 +2285,15 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) e1e_wphy(hw, 0x1796, 0x0008); /* Change cg_icount + enable integbp for channels BCD */ e1e_wphy(hw, 0x1798, 0xD008); - /* Change cg_icount + enable integbp + change prop_factor_master + /* + * Change cg_icount + enable integbp + change prop_factor_master * to 8 for channel A */ e1e_wphy(hw, 0x1898, 0xD918); /* Disable AHT in Slave mode on channel A */ e1e_wphy(hw, 0x187A, 0x0800); - /* Enable LPLU and disable AN to 1000 in non-D0a states, + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, * Enable SPD+B2B */ e1e_wphy(hw, 0x0019, 0x008D); @@ -2376,7 +2417,8 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw) e1000e_get_phy_id(hw); phy_type = e1000e_get_phy_type_from_id(hw->phy.id); - /* If phy_type is valid, break - we found our + /* + * If phy_type is valid, break - we found our * PHY address */ if (phy_type != e1000_phy_unknown) @@ -2436,7 +2478,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; - /* Page select is register 31 for phy address 1 and 22 for + /* + * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ @@ -2494,7 +2537,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; - /* Page select is register 31 for phy address 1 and 22 for + /* + * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ @@ -2639,7 +2683,8 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) return ret_val; } - /* Enable both PHY wakeup mode and Wakeup register page writes. + /* + * Enable both PHY wakeup mode and Wakeup register page writes. * Prevent a power state change by disabling ME and Host PHY wakeup. */ temp = *phy_reg; @@ -2653,7 +2698,8 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) return ret_val; } - /* Select Host Wakeup Registers page - caller now able to write + /* + * Select Host Wakeup Registers page - caller now able to write * registers on the Wakeup registers page */ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); @@ -2992,7 +3038,8 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if (page == HV_INTC_FC_PAGE_START) page = 0; - /* Workaround MDIO accesses being disabled after entering IEEE + /* + * Workaround MDIO accesses being disabled after entering IEEE * Power Down (when bit 11 of the PHY Control register is set) */ if ((hw->phy.type == e1000_phy_82578) && diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c index fdaaf2709d0a..8c12dbd0d6ce 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1028,15 +1028,6 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) * continue to check for link. */ hw->mac.get_link_status = !hw->mac.serdes_has_link; - - /* Configure Flow Control now that Auto-Neg has completed. - * First, we need to restore the desired flow control - * settings because we may have had to re-autoneg with a - * different link partner. - */ - ret_val = igb_config_fc_after_link_up(hw); - if (ret_val) - hw_dbg("Error configuring flow control\n"); } else { ret_val = igb_check_for_copper_link(hw); } @@ -1354,7 +1345,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) **/ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) { - u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + u32 ctrl_ext, ctrl_reg, reg; bool pcs_autoneg; s32 ret_val = E1000_SUCCESS; u16 data; @@ -1442,45 +1433,27 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + /* + * We force flow control to prevent the CTRL register values from being + * overwritten by the autonegotiated flow control values + */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + if (pcs_autoneg) { /* Set PCS register for autoneg */ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ - - /* Disable force flow control for autoneg */ - reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; - - /* Configure flow control advertisement for autoneg */ - anadv_reg = rd32(E1000_PCS_ANADV); - anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); - switch (hw->fc.requested_mode) { - case e1000_fc_full: - case e1000_fc_rx_pause: - anadv_reg |= E1000_TXCW_ASM_DIR; - anadv_reg |= E1000_TXCW_PAUSE; - break; - case e1000_fc_tx_pause: - anadv_reg |= E1000_TXCW_ASM_DIR; - break; - default: - break; - } - wr32(E1000_PCS_ANADV, anadv_reg); - hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { /* Set PCS register for forced link */ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ - /* Force flow control for forced link */ - reg |= E1000_PCS_LCTL_FORCE_FCTRL; - hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); } wr32(E1000_PCS_LCTL, reg); - if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + if (!igb_sgmii_active_82575(hw)) igb_force_mac_fc(hw); return ret_val; @@ -1954,12 +1927,6 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) hw->dev_spec._82575.global_device_reset = false; - /* due to hw errata, global device reset doesn't always - * work on 82580 - */ - if (hw->mac.type == e1000_82580) - global_device_reset = false; - /* Get current control state. */ ctrl = rd32(E1000_CTRL); diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_defines.h b/trunk/drivers/net/ethernet/intel/igb/e1000_defines.h index 45dce06eff26..198d14848820 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -431,10 +431,6 @@ #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 #define FLOW_CONTROL_TYPE 0x8808 -/* Transmit Config Word */ -#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ -#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ - /* 802.1q VLAN Packet Size */ #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ @@ -543,9 +539,6 @@ /* mPHY Near End Digital Loopback Override Bit */ #define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 -#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 -#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 - /* PHY Control Register */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_i210.c b/trunk/drivers/net/ethernet/intel/igb/e1000_i210.c index fbcdbebb0b5f..41474298d365 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -35,42 +35,11 @@ #include "e1000_hw.h" #include "e1000_i210.h" -/** - * igb_get_hw_semaphore_i210 - Acquire hardware semaphore - * @hw: pointer to the HW structure - * - * Acquire the HW semaphore to access the PHY or NVM - */ -static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) -{ - u32 swsm; - s32 ret_val = E1000_SUCCESS; - s32 timeout = hw->nvm.word_size + 1; - s32 i = 0; - - /* Get the FW semaphore. */ - for (i = 0; i < timeout; i++) { - swsm = rd32(E1000_SWSM); - wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); - - /* Semaphore acquired if bit latched */ - if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) - break; - - udelay(50); - } - - if (i == timeout) { - /* Release semaphores */ - igb_put_hw_semaphore(hw); - hw_dbg("Driver can't access the NVM\n"); - ret_val = -E1000_ERR_NVM; - goto out; - } - -out: - return ret_val; -} +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw); +static void igb_put_hw_semaphore_i210(struct e1000_hw *hw); +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw); /** * igb_acquire_nvm_i210 - Request for access to EEPROM @@ -98,23 +67,6 @@ void igb_release_nvm_i210(struct e1000_hw *hw) igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); } -/** - * igb_put_hw_semaphore_i210 - Release hardware semaphore - * @hw: pointer to the HW structure - * - * Release hardware semaphore used to access the PHY or NVM - */ -static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) -{ - u32 swsm; - - swsm = rd32(E1000_SWSM); - - swsm &= ~E1000_SWSM_SWESMBI; - - wr32(E1000_SWSM, swsm); -} - /** * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore * @hw: pointer to the HW structure @@ -185,6 +137,60 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) igb_put_hw_semaphore_i210(hw); } +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = E1000_SUCCESS; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore_i210 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~E1000_SWSM_SWESMBI; + + wr32(E1000_SWSM, swsm); +} + /** * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register * @hw: pointer to the HW structure @@ -222,6 +228,49 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, return status; } +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + /** * igb_write_nvm_srwr - Write to Shadow Ram using EEWR * @hw: pointer to the HW structure @@ -279,50 +328,6 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, return ret_val; } -/** - * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR - * @hw: pointer to the HW structure - * @offset: offset within the Shadow RAM to be written to - * @words: number of words to write - * @data: 16 bit word(s) to be written to the Shadow RAM - * - * Writes data to Shadow RAM at offset using EEWR register. - * - * If e1000_update_nvm_checksum is not called after this function , the - * data will not be committed to FLASH and also Shadow RAM will most likely - * contain an invalid checksum. - * - * If error code is returned, data and Shadow RAM may be inconsistent - buffer - * partially written. - */ -s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, - u16 *data) -{ - s32 status = E1000_SUCCESS; - u16 i, count; - - /* We cannot hold synchronization semaphores for too long, - * because of forceful takeover procedure. However it is more efficient - * to write in bursts than synchronizing access for each word. - */ - for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { - count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? - E1000_EERD_EEWR_MAX_COUNT : (words - i); - if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { - status = igb_write_nvm_srwr(hw, offset, count, - data + i); - hw->nvm.ops.release(hw); - } else { - status = E1000_ERR_SWFW_SYNC; - } - - if (status != E1000_SUCCESS) - break; - } - - return status; -} - /** * igb_read_nvm_i211 - Read NVM wrapper function for I211 * @hw: pointer to the HW structure @@ -345,40 +350,16 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, if (ret_val != E1000_SUCCESS) hw_dbg("MAC Addr not found in iNVM\n"); break; + case NVM_ID_LED_SETTINGS: case NVM_INIT_CTRL_2: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); - if (ret_val != E1000_SUCCESS) { - *data = NVM_INIT_CTRL_2_DEFAULT_I211; - ret_val = E1000_SUCCESS; - } - break; case NVM_INIT_CTRL_4: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); - if (ret_val != E1000_SUCCESS) { - *data = NVM_INIT_CTRL_4_DEFAULT_I211; - ret_val = E1000_SUCCESS; - } - break; case NVM_LED_1_CFG: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); - if (ret_val != E1000_SUCCESS) { - *data = NVM_LED_1_CFG_DEFAULT_I211; - ret_val = E1000_SUCCESS; - } - break; case NVM_LED_0_2_CFG: igb_read_invm_i211(hw, offset, data); - if (ret_val != E1000_SUCCESS) { - *data = NVM_LED_0_2_CFG_DEFAULT_I211; - ret_val = E1000_SUCCESS; - } break; - case NVM_ID_LED_SETTINGS: - ret_val = igb_read_invm_i211(hw, (u8)offset, data); - if (ret_val != E1000_SUCCESS) { - *data = ID_LED_RESERVED_FFFF; - ret_val = E1000_SUCCESS; - } + case NVM_COMPAT: + *data = ID_LED_DEFAULT_I210; + break; case NVM_SUB_DEV_ID: *data = hw->subsystem_device_id; break; @@ -631,28 +612,6 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) return ret_val; } -/** - * igb_pool_flash_update_done_i210 - Pool FLUDONE status. - * @hw: pointer to the HW structure - * - */ -static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) -{ - s32 ret_val = -E1000_ERR_NVM; - u32 i, reg; - - for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { - reg = rd32(E1000_EECD); - if (reg & E1000_EECD_FLUDONE_I210) { - ret_val = E1000_SUCCESS; - break; - } - udelay(5); - } - - return ret_val; -} - /** * igb_update_flash_i210 - Commit EEPROM to the flash * @hw: pointer to the HW structure @@ -682,6 +641,28 @@ s32 igb_update_flash_i210(struct e1000_hw *hw) return ret_val; } +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + udelay(5); + } + + return ret_val; +} + /** * igb_valid_led_default_i210 - Verify a valid default LED config * @hw: pointer to the HW structure diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_i210.h b/trunk/drivers/net/ethernet/intel/igb/e1000_i210.h index 1c89358a99ab..974d23584d70 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -84,10 +84,4 @@ enum E1000_INVM_STRUCTURE_TYPE { (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_DEF1_DEF2)) -/* NVM offset defaults for i211 device */ -#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 -#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 -#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 -#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C - #endif diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_mac.c b/trunk/drivers/net/ethernet/intel/igb/e1000_mac.c index 101e6e4da97f..7acddfe9e6d5 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -839,7 +839,6 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = 0; - u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; @@ -1041,129 +1040,6 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) goto out; } } - /* Check for the case where we have SerDes media and auto-neg is - * enabled. In this case, we need to check and see if Auto-Neg - * has completed, and if so, how the PHY and link partner has - * flow control configured. - */ - if ((hw->phy.media_type == e1000_media_type_internal_serdes) - && mac->autoneg) { - /* Read the PCS_LSTS and check to see if AutoNeg - * has completed. - */ - pcs_status_reg = rd32(E1000_PCS_LSTAT); - - if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { - hw_dbg("PCS Auto Neg has not completed.\n"); - return ret_val; - } - - /* The AutoNeg process has completed, so we now need to - * read both the Auto Negotiation Advertisement - * Register (PCS_ANADV) and the Auto_Negotiation Base - * Page Ability Register (PCS_LPAB) to determine how - * flow control was negotiated. - */ - pcs_adv_reg = rd32(E1000_PCS_ANADV); - pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); - - /* Two bits in the Auto Negotiation Advertisement Register - * (PCS_ANADV) and two bits in the Auto Negotiation Base - * Page Ability Register (PCS_LPAB) determine flow control - * for both the PHY and the link partner. The following - * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, - * 1999, describes these PAUSE resolution bits and how flow - * control is determined based upon these settings. - * NOTE: DC = Don't Care - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution - *-------|---------|-------|---------|-------------------- - * 0 | 0 | DC | DC | e1000_fc_none - * 0 | 1 | 0 | DC | e1000_fc_none - * 0 | 1 | 1 | 0 | e1000_fc_none - * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - * 1 | 0 | 0 | DC | e1000_fc_none - * 1 | DC | 1 | DC | e1000_fc_full - * 1 | 1 | 0 | 0 | e1000_fc_none - * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - * - * Are both PAUSE bits set to 1? If so, this implies - * Symmetric Flow Control is enabled at both ends. The - * ASM_DIR bits are irrelevant per the spec. - * - * For Symmetric Flow Control: - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | DC | 1 | DC | e1000_fc_full - * - */ - if ((pcs_adv_reg & E1000_TXCW_PAUSE) && - (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { - /* Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise Rx - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == e1000_fc_full) { - hw->fc.current_mode = e1000_fc_full; - hw_dbg("Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = Rx PAUSE frames only.\n"); - } - } - /* For receiving PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 0 | 1 | 1 | 1 | e1000_fc_tx_pause - */ - else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && - (pcs_adv_reg & E1000_TXCW_ASM_DIR) && - (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && - (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_tx_pause; - hw_dbg("Flow Control = Tx PAUSE frames only.\n"); - } - /* For transmitting PAUSE frames ONLY. - * - * LOCAL DEVICE | LINK PARTNER - * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result - *-------|---------|-------|---------|-------------------- - * 1 | 1 | 0 | 1 | e1000_fc_rx_pause - */ - else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && - (pcs_adv_reg & E1000_TXCW_ASM_DIR) && - !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && - (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { - hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = Rx PAUSE frames only.\n"); - } else { - /* Per the IEEE spec, at this point flow control - * should be disabled. - */ - hw->fc.current_mode = e1000_fc_none; - hw_dbg("Flow Control = NONE.\n"); - } - - /* Now we call a subroutine to actually force the MAC - * controller to use the correct flow control settings. - */ - pcs_ctrl_reg = rd32(E1000_PCS_LCTL); - pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; - wr32(E1000_PCS_LCTL, pcs_ctrl_reg); - - ret_val = igb_force_mac_fc(hw); - if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); - return ret_val; - } - } out: return ret_val; diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_nvm.c b/trunk/drivers/net/ethernet/intel/igb/e1000_nvm.c index fbb7604db364..7db3f80bcd57 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -438,7 +438,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val = -E1000_ERR_NVM; + s32 ret_val; u16 widx = 0; /* @@ -448,21 +448,22 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { hw_dbg("nvm parameter(s) out of bounds\n"); - return ret_val; + ret_val = -E1000_ERR_NVM; + goto out; } + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + msleep(10); + while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; - ret_val = nvm->ops.acquire(hw); - if (ret_val) - return ret_val; - ret_val = igb_ready_nvm_eeprom(hw); - if (ret_val) { - nvm->ops.release(hw); - return ret_val; - } + if (ret_val) + goto release; igb_standby_nvm(hw); @@ -496,10 +497,13 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; } } - usleep_range(1000, 2000); - nvm->ops.release(hw); } + msleep(10); +release: + hw->nvm.ops.release(hw); + +out: return ret_val; } diff --git a/trunk/drivers/net/ethernet/intel/igb/igb.h b/trunk/drivers/net/ethernet/intel/igb/igb.h index 17f1686ee411..c15a4811b476 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb.h +++ b/trunk/drivers/net/ethernet/intel/igb/igb.h @@ -42,8 +42,6 @@ struct igb_adapter; -#define E1000_PCS_CFG_IGN_SD 1 - /* Interrupt defines */ #define IGB_START_ITR 648 /* ~6000 ints/sec */ #define IGB_4K_ITR 980 diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c index bfe9208c4b18..e2288b5a9caa 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1624,20 +1624,6 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) reg &= ~E1000_CONNSW_ENRGSRC; wr32(E1000_CONNSW, reg); - /* Unset sigdetect for SERDES loopback on - * 82580 and i350 devices. - */ - switch (hw->mac.type) { - case e1000_82580: - case e1000_i350: - reg = rd32(E1000_PCS_CFG0); - reg |= E1000_PCS_CFG_IGN_SD; - wr32(E1000_PCS_CFG0, reg); - break; - default: - break; - } - /* Set PCS register for forced speed */ reg = rd32(E1000_PCS_LCTL); reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 50aa546b8c7a..efaf9a73cc79 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -47,27 +47,23 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - char *buf; + char buf[256]; + int bytes_not_copied; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_reg_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); + len = snprintf(buf, sizeof(buf), "%s: %s\n", + adapter->netdev->name, ixgbe_dbg_reg_ops_buf); + if (count < len) return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + bytes_not_copied = copy_to_user(buffer, buf, len); + if (bytes_not_copied < 0) + return bytes_not_copied; - kfree(buf); + *ppos = len; return len; } @@ -83,7 +79,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - int len; + int bytes_not_copied; /* don't allow partial writes */ if (*ppos != 0) @@ -91,15 +87,14 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) return -ENOSPC; - len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, - sizeof(ixgbe_dbg_reg_ops_buf)-1, - ppos, - buffer, - count); - if (len < 0) - return len; - - ixgbe_dbg_reg_ops_buf[len] = '\0'; + bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied < count) + count -= bytes_not_copied; + else + return -ENOSPC; + ixgbe_dbg_reg_ops_buf[count] = '\0'; if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { u32 reg, value; @@ -152,27 +147,23 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - char *buf; + char buf[256]; + int bytes_not_copied; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); + len = snprintf(buf, sizeof(buf), "%s: %s\n", + adapter->netdev->name, ixgbe_dbg_netdev_ops_buf); + if (count < len) return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + bytes_not_copied = copy_to_user(buffer, buf, len); + if (bytes_not_copied < 0) + return bytes_not_copied; - kfree(buf); + *ppos = len; return len; } @@ -188,7 +179,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - int len; + int bytes_not_copied; /* don't allow partial writes */ if (*ppos != 0) @@ -196,15 +187,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) return -ENOSPC; - len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, - sizeof(ixgbe_dbg_netdev_ops_buf)-1, - ppos, - buffer, - count); - if (len < 0) - return len; - - ixgbe_dbg_netdev_ops_buf[len] = '\0'; + bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf, + buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied < count) + count -= bytes_not_copied; + else + return -ENOSPC; + ixgbe_dbg_netdev_ops_buf[count] = '\0'; if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 79b834583188..484bbedffe2a 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1338,29 +1338,26 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, if (hlen < sizeof(struct iphdr)) return hdr.network - data; - /* record next protocol if header is present */ - if (!hdr.ipv4->frag_off) - nexthdr = hdr.ipv4->protocol; + /* record next protocol */ + nexthdr = hdr.ipv4->protocol; + hdr.network += hlen; } else if (protocol == __constant_htons(ETH_P_IPV6)) { if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) return max_len; /* record next protocol */ nexthdr = hdr.ipv6->nexthdr; - hlen = sizeof(struct ipv6hdr); + hdr.network += sizeof(struct ipv6hdr); #ifdef IXGBE_FCOE } else if (protocol == __constant_htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) return max_len; - hlen = FCOE_HEADER_LEN; + hdr.network += FCOE_HEADER_LEN; #endif } else { return hdr.network - data; } - /* relocate pointer to start of L4 header */ - hdr.network += hlen; - /* finally sort out TCP/UDP */ if (nexthdr == IPPROTO_TCP) { if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) diff --git a/trunk/drivers/net/ethernet/myricom/Kconfig b/trunk/drivers/net/ethernet/myricom/Kconfig index 3932d081fa21..540f0c6fc160 100644 --- a/trunk/drivers/net/ethernet/myricom/Kconfig +++ b/trunk/drivers/net/ethernet/myricom/Kconfig @@ -23,6 +23,7 @@ config MYRI10GE depends on PCI && INET select FW_LOADER select CRC32 + select INET_LRO ---help--- This driver supports Myricom Myri-10G Dual Protocol interface in Ethernet mode. If the eeprom on your board is not recent enough, diff --git a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 2fc984a03771..83516e3369c9 100644 --- a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -95,6 +96,8 @@ MODULE_LICENSE("Dual BSD/GPL"); #define MYRI10GE_EEPROM_STRINGS_SIZE 256 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) +#define MYRI10GE_MAX_LRO_DESCRIPTORS 8 +#define MYRI10GE_LRO_MAX_PKTS 64 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff @@ -162,6 +165,8 @@ struct myri10ge_rx_done { dma_addr_t bus; int cnt; int idx; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; }; struct myri10ge_slice_netstats { @@ -333,6 +338,11 @@ static int myri10ge_debug = -1; /* defaults above */ module_param(myri10ge_debug, int, 0); MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); +static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; +module_param(myri10ge_lro_max_pkts, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_lro_max_pkts, + "Number of LRO packets to be aggregated"); + static int myri10ge_fill_thresh = 256; module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); @@ -1187,6 +1197,36 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) } } +static inline void +myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, + struct skb_frag_struct *rx_frags, int len, int hlen) +{ + struct skb_frag_struct *skb_frags; + + skb->len = skb->data_len = len; + /* attach the page(s) */ + + skb_frags = skb_shinfo(skb)->frags; + while (len > 0) { + memcpy(skb_frags, rx_frags, sizeof(*skb_frags)); + len -= skb_frag_size(rx_frags); + skb_frags++; + rx_frags++; + skb_shinfo(skb)->nr_frags++; + } + + /* pskb_may_pull is not available in irq context, but + * skb_pull() (for ether_pad and eth_type_trans()) requires + * the beginning of the packet in skb_headlen(), move it + * manually */ + skb_copy_to_linear_data(skb, va, hlen); + skb_shinfo(skb)->frags[0].page_offset += hlen; + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen); + skb->data_len -= hlen; + skb->tail += hlen; + skb_pull(skb, MXGEFW_PAD); +} + static void myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, int bytes, int watchdog) @@ -1264,50 +1304,18 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, } } -/* - * GRO does not support acceleration of tagged vlan frames, and - * this NIC does not support vlan tag offload, so we must pop - * the tag ourselves to be able to achieve GRO performance that - * is comparable to LRO. - */ - -static inline void -myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) -{ - u8 *va; - struct vlan_ethhdr *veh; - struct skb_frag_struct *frag; - __wsum vsum; - - va = addr; - va += MXGEFW_PAD; - veh = (struct vlan_ethhdr *)va; - if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX && - veh->h_vlan_proto == ntohs(ETH_P_8021Q)) { - /* fixup csum if needed */ - if (skb->ip_summed == CHECKSUM_COMPLETE) { - vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0); - skb->csum = csum_sub(skb->csum, vsum); - } - /* pop tag */ - __vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI)); - memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN); - skb->len -= VLAN_HLEN; - skb->data_len -= VLAN_HLEN; - frag = skb_shinfo(skb)->frags; - frag->page_offset += VLAN_HLEN; - skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN); - } -} +#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a + * page into an skb */ static inline int -myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) +myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, + bool lro_enabled) { struct myri10ge_priv *mgp = ss->mgp; struct sk_buff *skb; - struct skb_frag_struct *rx_frags; + struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; struct myri10ge_rx_buf *rx; - int i, idx, remainder, bytes; + int i, idx, hlen, remainder, bytes; struct pci_dev *pdev = mgp->pdev; struct net_device *dev = mgp->dev; u8 *va; @@ -1324,48 +1332,67 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) idx = rx->cnt & rx->mask; va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; prefetch(va); - - skb = napi_get_frags(&ss->napi); - if (unlikely(skb == NULL)) { - ss->stats.rx_dropped++; - for (i = 0, remainder = len; remainder > 0; i++) { - myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); - put_page(rx->info[idx].page); - rx->cnt++; - idx = rx->cnt & rx->mask; - remainder -= MYRI10GE_ALLOC_SIZE; - } - return 0; - } - rx_frags = skb_shinfo(skb)->frags; /* Fill skb_frag_struct(s) with data from our receive */ for (i = 0, remainder = len; remainder > 0; i++) { myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); - skb_fill_page_desc(skb, i, rx->info[idx].page, - rx->info[idx].page_offset, - remainder < MYRI10GE_ALLOC_SIZE ? - remainder : MYRI10GE_ALLOC_SIZE); + __skb_frag_set_page(&rx_frags[i], rx->info[idx].page); + rx_frags[i].page_offset = rx->info[idx].page_offset; + if (remainder < MYRI10GE_ALLOC_SIZE) + skb_frag_size_set(&rx_frags[i], remainder); + else + skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE); rx->cnt++; idx = rx->cnt & rx->mask; remainder -= MYRI10GE_ALLOC_SIZE; } - /* remove padding */ - rx_frags[0].page_offset += MXGEFW_PAD; - rx_frags[0].size -= MXGEFW_PAD; - len -= MXGEFW_PAD; + if (lro_enabled) { + rx_frags[0].page_offset += MXGEFW_PAD; + skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD); + len -= MXGEFW_PAD; + lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, + /* opaque, will come back in get_frag_header */ + len, len, + (void *)(__force unsigned long)csum, csum); - skb->len = len; - skb->data_len = len; - skb->truesize += len; - if (dev->features & NETIF_F_RXCSUM) { - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = csum; + return 1; + } + + hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; + + /* allocate an skb to attach the page(s) to. This is done + * after trying LRO, so as to avoid skb allocation overheads */ + + skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); + if (unlikely(skb == NULL)) { + ss->stats.rx_dropped++; + do { + i--; + __skb_frag_unref(&rx_frags[i]); + } while (i != 0); + return 0; + } + + /* Attach the pages to the skb, and trim off any padding */ + myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen); + if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) { + skb_frag_unref(skb, 0); + skb_shinfo(skb)->nr_frags = 0; + } else { + skb->truesize += bytes * skb_shinfo(skb)->nr_frags; } - myri10ge_vlan_rx(mgp->dev, va, skb); + skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, ss - &mgp->ss[0]); - napi_gro_frags(&ss->napi); + if (dev->features & NETIF_F_RXCSUM) { + if ((skb->protocol == htons(ETH_P_IP)) || + (skb->protocol == htons(ETH_P_IPV6))) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } else + myri10ge_vlan_ip_csum(skb, csum); + } + netif_receive_skb(skb); return 1; } @@ -1453,11 +1480,18 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) u16 length; __wsum checksum; + /* + * Prevent compiler from generating more than one ->features memory + * access to avoid theoretical race condition with functions that + * change NETIF_F_LRO flag at runtime. + */ + bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO); + while (rx_done->entry[idx].length != 0 && work_done < budget) { length = ntohs(rx_done->entry[idx].length); rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); - rx_ok = myri10ge_rx_done(ss, length, checksum); + rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); rx_packets += rx_ok; rx_bytes += rx_ok * (unsigned long)length; cnt++; @@ -1469,6 +1503,9 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) ss->stats.rx_packets += rx_packets; ss->stats.rx_bytes += rx_bytes; + if (lro_enabled) + lro_flush_all(&rx_done->lro_mgr); + /* restock receive rings if needed */ if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &ss->rx_small, @@ -1742,6 +1779,7 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", "wake_queue", "stop_queue", "tx_linearized", + "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc", }; #define MYRI10GE_NET_STATS_LEN 21 @@ -1842,6 +1880,14 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ss->tx.wake_queue; data[i++] = (unsigned int)ss->tx.stop_queue; data[i++] = (unsigned int)ss->tx.linearized; + data[i++] = ss->rx_done.lro_mgr.stats.aggregated; + data[i++] = ss->rx_done.lro_mgr.stats.flushed; + if (ss->rx_done.lro_mgr.stats.flushed) + data[i++] = ss->rx_done.lro_mgr.stats.aggregated / + ss->rx_done.lro_mgr.stats.flushed; + else + data[i++] = 0; + data[i++] = ss->rx_done.lro_mgr.stats.no_desc; } } @@ -2225,6 +2271,67 @@ static void myri10ge_free_irq(struct myri10ge_priv *mgp) pci_disable_msix(pdev); } +static int +myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, + void **ip_hdr, void **tcpudp_hdr, + u64 * hdr_flags, void *priv) +{ + struct ethhdr *eh; + struct vlan_ethhdr *veh; + struct iphdr *iph; + u8 *va = skb_frag_address(frag); + unsigned long ll_hlen; + /* passed opaque through lro_receive_frags() */ + __wsum csum = (__force __wsum) (unsigned long)priv; + + /* find the mac header, aborting if not IPv4 */ + + eh = (struct ethhdr *)va; + *mac_hdr = eh; + ll_hlen = ETH_HLEN; + if (eh->h_proto != htons(ETH_P_IP)) { + if (eh->h_proto == htons(ETH_P_8021Q)) { + veh = (struct vlan_ethhdr *)va; + if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -1; + + ll_hlen += VLAN_HLEN; + + /* + * HW checksum starts ETH_HLEN bytes into + * frame, so we must subtract off the VLAN + * header's checksum before csum can be used + */ + csum = csum_sub(csum, csum_partial(va + ETH_HLEN, + VLAN_HLEN, 0)); + } else { + return -1; + } + } + *hdr_flags = LRO_IPV4; + + iph = (struct iphdr *)(va + ll_hlen); + *ip_hdr = iph; + if (iph->protocol != IPPROTO_TCP) + return -1; + if (ip_is_fragment(iph)) + return -1; + *hdr_flags |= LRO_TCP; + *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); + + /* verify the IP checksum */ + if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl))) + return -1; + + /* verify the checksum */ + if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr, + ntohs(iph->tot_len) - (iph->ihl << 2), + IPPROTO_TCP, csum))) + return -1; + + return 0; +} + static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) { struct myri10ge_cmd cmd; @@ -2295,6 +2402,7 @@ static int myri10ge_open(struct net_device *dev) struct myri10ge_cmd cmd; int i, status, big_pow2, slice; u8 *itable; + struct net_lro_mgr *lro_mgr; if (mgp->running != MYRI10GE_ETH_STOPPED) return -EBUSY; @@ -2405,6 +2513,19 @@ static int myri10ge_open(struct net_device *dev) goto abort_with_rings; } + lro_mgr = &ss->rx_done.lro_mgr; + lro_mgr->dev = dev; + lro_mgr->features = LRO_F_NAPI; + lro_mgr->ip_summed = CHECKSUM_COMPLETE; + lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; + lro_mgr->lro_arr = ss->rx_done.lro_desc; + lro_mgr->get_frag_header = myri10ge_get_frag_header; + lro_mgr->max_aggr = myri10ge_lro_max_pkts; + lro_mgr->frag_align_pad = 2; + if (lro_mgr->max_aggr > MAX_SKB_FRAGS) + lro_mgr->max_aggr = MAX_SKB_FRAGS; + /* must happen prior to any irq */ napi_enable(&(ss)->napi); } @@ -3022,6 +3143,15 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr) return 0; } +static netdev_features_t myri10ge_fix_features(struct net_device *dev, + netdev_features_t features) +{ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) { struct myri10ge_priv *mgp = netdev_priv(dev); @@ -3748,6 +3878,7 @@ static const struct net_device_ops myri10ge_netdev_ops = { .ndo_get_stats64 = myri10ge_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = myri10ge_change_mtu, + .ndo_fix_features = myri10ge_fix_features, .ndo_set_rx_mode = myri10ge_set_multicast_list, .ndo_set_mac_address = myri10ge_set_mac_address, }; @@ -3887,11 +4018,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &myri10ge_netdev_ops; netdev->mtu = myri10ge_initial_mtu; - netdev->hw_features = mgp->features | NETIF_F_RXCSUM; - - /* fake NETIF_F_HW_VLAN_RX for good GRO performance */ - netdev->hw_features |= NETIF_F_HW_VLAN_RX; - + netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; netdev->features = netdev->hw_features; if (dac_enabled) diff --git a/trunk/drivers/net/ethernet/sfc/ethtool.c b/trunk/drivers/net/ethernet/sfc/ethtool.c index 90f078eff8e6..8e61cd06f66a 100644 --- a/trunk/drivers/net/ethernet/sfc/ethtool.c +++ b/trunk/drivers/net/ethernet/sfc/ethtool.c @@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) /* MAC address mask including only MC flag */ static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; +#define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define PORT_FULL_MASK ((__force __be16)~0) + static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { @@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, &ip_entry->ip4src, &ip_entry->psrc); EFX_WARN_ON_PARANOID(rc); - ip_mask->ip4src = ~0; - ip_mask->psrc = ~0; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + ip_mask->psrc = PORT_FULL_MASK; } rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; - ip_mask->ip4dst = ~0; - ip_mask->pdst = ~0; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + ip_mask->pdst = PORT_FULL_MASK; return rc; } @@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, /* Check for unsupported extensions */ if ((rule->flow_type & FLOW_EXT) && - (rule->m_ext.vlan_etype | rule->m_ext.data[0] | + (rule->m_ext.vlan_etype || rule->m_ext.data[0] || rule->m_ext.data[1])) return -EINVAL; @@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, IPPROTO_TCP : IPPROTO_UDP); /* Must match all of destination, */ - if ((__force u32)~ip_mask->ip4dst | - (__force u16)~ip_mask->pdst) + if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK && + ip_mask->pdst == PORT_FULL_MASK)) return -EINVAL; /* all or none of source, */ - if ((ip_mask->ip4src | ip_mask->psrc) && - ((__force u32)~ip_mask->ip4src | - (__force u16)~ip_mask->psrc)) + if ((ip_mask->ip4src || ip_mask->psrc) && + !(ip_mask->ip4src == IP4_ADDR_FULL_MASK && + ip_mask->psrc == PORT_FULL_MASK)) return -EINVAL; /* and nothing else */ - if (ip_mask->tos | rule->m_ext.vlan_tci) + if (ip_mask->tos || rule->m_ext.vlan_tci) return -EINVAL; if (ip_mask->ip4src) diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a8..7fdeb528133d 100644 --- a/trunk/drivers/net/hyperv/rndis_filter.c +++ b/trunk/drivers/net/hyperv/rndis_filter.c @@ -605,11 +605,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac) return -EBUSY; } else { set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", - set_complete->status); + if (set_complete->status != RNDIS_STATUS_SUCCESS) ret = -EINVAL; - } } cleanup: diff --git a/trunk/drivers/net/usb/smsc75xx.c b/trunk/drivers/net/usb/smsc75xx.c index 1cbd936bb028..18238060f1c0 100644 --- a/trunk/drivers/net/usb/smsc75xx.c +++ b/trunk/drivers/net/usb/smsc75xx.c @@ -64,6 +64,15 @@ #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) +#define check_warn(ret, fmt, args...) \ + ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) + +#define check_warn_return(ret, fmt, args...) \ + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } }) + +#define check_warn_goto_done(ret, fmt, args...) \ + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } }) + struct smsc75xx_priv { struct usbnet *dev; u32 rfe_ctl; @@ -173,10 +182,7 @@ static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_ACCESS\n"); - return ret; - } + check_warn_return(ret, "Error reading MII_ACCESS\n"); if (!(val & MII_ACCESS_BUSY)) return 0; @@ -196,10 +202,7 @@ static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx, /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_read\n"); - goto done; - } + check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read\n"); /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; @@ -208,22 +211,13 @@ static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx, | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_READ | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ACCESS\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing MII_ACCESS\n"); ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); - goto done; - } + check_warn_goto_done(ret, "Timed out reading MII reg %02X\n", idx); ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading MII_DATA\n"); ret = (u16)(val & 0xFFFF); @@ -243,17 +237,11 @@ static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id, /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_write\n"); - goto done; - } + check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write\n"); val = regval; ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing MII_DATA\n"); /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; @@ -262,16 +250,10 @@ static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id, | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_WRITE | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ACCESS\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing MII_ACCESS\n"); ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); - goto done; - } + check_warn_goto_done(ret, "Timed out writing MII reg %02X\n", idx); done: mutex_unlock(&dev->phy_mutex); @@ -308,10 +290,7 @@ static int smsc75xx_wait_eeprom(struct usbnet *dev) do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error reading E2P_CMD\n"); if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) break; @@ -334,10 +313,7 @@ static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error reading E2P_CMD\n"); if (!(val & E2P_CMD_BUSY)) return 0; @@ -365,20 +341,14 @@ static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, for (i = 0; i < length; i++) { val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_CMD\n"); ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; ret = smsc75xx_read_reg(dev, E2P_DATA, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading E2P_DATA\n"); - return ret; - } + check_warn_return(ret, "Error reading E2P_DATA\n"); data[i] = val & 0xFF; offset++; @@ -403,10 +373,7 @@ static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, /* Issue write/erase enable command */ val = E2P_CMD_BUSY | E2P_CMD_EWEN; ret = smsc75xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_CMD\n"); ret = smsc75xx_wait_eeprom(dev); if (ret < 0) @@ -417,18 +384,12 @@ static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, /* Fill data register */ val = data[i]; ret = smsc75xx_write_reg(dev, E2P_DATA, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_DATA\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_DATA\n"); /* Send "write" command */ val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_CMD\n"); ret = smsc75xx_wait_eeprom(dev); if (ret < 0) @@ -447,10 +408,7 @@ static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) for (i = 0; i < 100; i++) { u32 dp_sel; ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); - if (ret < 0) { - netdev_warn(dev->net, "Error reading DP_SEL\n"); - return ret; - } + check_warn_return(ret, "Error reading DP_SEL\n"); if (dp_sel & DP_SEL_DPRDY) return 0; @@ -473,49 +431,28 @@ static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, mutex_lock(&pdata->dataport_mutex); ret = smsc75xx_dataport_wait_not_busy(dev); - if (ret < 0) { - netdev_warn(dev->net, "smsc75xx_dataport_write busy on entry\n"); - goto done; - } + check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry\n"); ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); - if (ret < 0) { - netdev_warn(dev->net, "Error reading DP_SEL\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading DP_SEL\n"); dp_sel &= ~DP_SEL_RSEL; dp_sel |= ram_select; ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); - if (ret < 0) { - netdev_warn(dev->net, "Error writing DP_SEL\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing DP_SEL\n"); for (i = 0; i < length; i++) { ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); - if (ret < 0) { - netdev_warn(dev->net, "Error writing DP_ADDR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing DP_ADDR\n"); ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing DP_DATA\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing DP_DATA\n"); ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); - if (ret < 0) { - netdev_warn(dev->net, "Error writing DP_CMD\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing DP_CMD\n"); ret = smsc75xx_dataport_wait_not_busy(dev); - if (ret < 0) { - netdev_warn(dev->net, "smsc75xx_dataport_write timeout\n"); - goto done; - } + check_warn_goto_done(ret, "smsc75xx_dataport_write timeout\n"); } done: @@ -543,8 +480,7 @@ static void smsc75xx_deferred_multicast_write(struct work_struct *param) DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - if (ret < 0) - netdev_warn(dev->net, "Error writing RFE_CRL\n"); + check_warn(ret, "Error writing RFE_CRL\n"); } static void smsc75xx_set_multicast(struct net_device *netdev) @@ -618,16 +554,10 @@ static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, } ret = smsc75xx_write_reg(dev, FLOW, flow); - if (ret < 0) { - netdev_warn(dev->net, "Error writing FLOW\n"); - return ret; - } + check_warn_return(ret, "Error writing FLOW\n"); ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); - if (ret < 0) { - netdev_warn(dev->net, "Error writing FCT_FLOW\n"); - return ret; - } + check_warn_return(ret, "Error writing FCT_FLOW\n"); return 0; } @@ -644,10 +574,7 @@ static int smsc75xx_link_reset(struct usbnet *dev) PHY_INT_SRC_CLEAR_ALL); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); - if (ret < 0) { - netdev_warn(dev->net, "Error writing INT_STS\n"); - return ret; - } + check_warn_return(ret, "Error writing INT_STS\n"); mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); @@ -731,10 +658,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net, pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); - if (ret < 0) - netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret); + check_warn_return(ret, "device_set_wakeup_enable error %d\n", ret); - return ret; + return 0; } static const struct ethtool_ops smsc75xx_ethtool_ops = { @@ -787,29 +713,19 @@ static int smsc75xx_set_mac_address(struct usbnet *dev) u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write RX_ADDRH: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write RX_ADDRH: %d\n", ret); ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write RX_ADDRL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write RX_ADDRL: %d\n", ret); addr_hi |= ADDR_FILTX_FB_VALID; ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write ADDR_FILTX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write ADDR_FILTX: %d\n", ret); ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); - if (ret < 0) - netdev_warn(dev->net, "Failed to write ADDR_FILTX+4: %d\n", ret); + check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d\n", ret); - return ret; + return 0; } static int smsc75xx_phy_initialize(struct usbnet *dev) @@ -831,10 +747,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) do { msleep(10); bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); - if (bmcr < 0) { - netdev_warn(dev->net, "Error reading MII_BMCR\n"); - return bmcr; - } + check_warn_return(bmcr, "Error reading MII_BMCR\n"); timeout++; } while ((bmcr & BMCR_RESET) && (timeout < 100)); @@ -851,11 +764,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) /* read and write to clear phy interrupt status */ ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); - return ret; - } - + check_warn_return(ret, "Error reading PHY_INT_SRC\n"); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, @@ -873,20 +782,14 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) bool rxenabled; ret = smsc75xx_read_reg(dev, MAC_RX, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read MAC_RX: %d\n", ret); rxenabled = ((buf & MAC_RX_RXEN) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret); } /* add 4 to size for FCS */ @@ -894,18 +797,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); ret = smsc75xx_write_reg(dev, MAC_RX, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret); if (rxenabled) { buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret); } return 0; @@ -916,10 +813,7 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) struct usbnet *dev = netdev_priv(netdev); int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); - if (ret < 0) { - netdev_warn(dev->net, "Failed to set mac rx frame length\n"); - return ret; - } + check_warn_return(ret, "Failed to set mac rx frame length\n"); return usbnet_change_mtu(netdev, new_mtu); } @@ -944,10 +838,9 @@ static int smsc75xx_set_features(struct net_device *netdev, /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - if (ret < 0) - netdev_warn(dev->net, "Error writing RFE_CTL\n"); + check_warn_return(ret, "Error writing RFE_CTL\n"); - return ret; + return 0; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) @@ -960,10 +853,7 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read PMT_CTL: %d\n", ret); if (buf & PMT_CTL_DEV_RDY) return 0; @@ -985,33 +875,21 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n"); ret = smsc75xx_wait_ready(dev, 0); - if (ret < 0) { - netdev_warn(dev->net, "device not ready in smsc75xx_reset\n"); - return ret; - } + check_warn_return(ret, "device not ready in smsc75xx_reset\n"); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); buf |= HW_CFG_LRST; ret = smsc75xx_write_reg(dev, HW_CFG, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret); timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); timeout++; } while ((buf & HW_CFG_LRST) && (timeout < 100)); @@ -1023,27 +901,18 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n"); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read PMT_CTL: %d\n", ret); buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write PMT_CTL: %d\n", ret); timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read PMT_CTL: %d\n", ret); timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); @@ -1057,19 +926,13 @@ static int smsc75xx_reset(struct usbnet *dev) smsc75xx_init_mac_address(dev); ret = smsc75xx_set_mac_address(dev); - if (ret < 0) { - netdev_warn(dev->net, "Failed to set mac address\n"); - return ret; - } + check_warn_return(ret, "Failed to set mac address\n"); netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n", dev->net->dev_addr); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n", buf); @@ -1077,16 +940,10 @@ static int smsc75xx_reset(struct usbnet *dev) buf |= HW_CFG_BIR; ret = smsc75xx_write_reg(dev, HW_CFG, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n", buf); @@ -1106,57 +963,36 @@ static int smsc75xx_reset(struct usbnet *dev) (ulong)dev->rx_urb_size); ret = smsc75xx_write_reg(dev, BURST_CAP, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret); ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret); ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf); if (turbo_mode) { ret = smsc75xx_read_reg(dev, HW_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); buf |= (HW_CFG_MEF | HW_CFG_BCE); ret = smsc75xx_write_reg(dev, HW_CFG, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); } @@ -1164,92 +1000,58 @@ static int smsc75xx_reset(struct usbnet *dev) /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FCT_RX_FIFO_END: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d\n", ret); netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf); buf = (MAX_TX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FCT_TX_FIFO_END: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d\n", ret); netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write INT_STS: %d\n", ret); ret = smsc75xx_read_reg(dev, ID_REV, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read ID_REV: %d\n", ret); netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, E2P_CMD, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read E2P_CMD: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read E2P_CMD: %d\n", ret); /* only set default GPIO/LED settings if no EEPROM is detected */ if (!(buf & E2P_CMD_LOADED)) { ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read LED_GPIO_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d\n", + ret); buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", + ret); } ret = smsc75xx_write_reg(dev, FLOW, 0); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FLOW: %d\n", ret); ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FCT_FLOW: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FCT_FLOW: %d\n", ret); /* Don't need rfe_ctl_lock during initialisation */ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read RFE_CTL: %d\n", ret); pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write RFE_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write RFE_CTL: %d\n", ret); ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read RFE_CTL: %d\n", ret); netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n", pdata->rfe_ctl); @@ -1260,107 +1062,65 @@ static int smsc75xx_reset(struct usbnet *dev) smsc75xx_set_multicast(dev->net); ret = smsc75xx_phy_initialize(dev); - if (ret < 0) { - netdev_warn(dev->net, "Failed to initialize PHY: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to initialize PHY: %d\n", ret); ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret); /* enable PHY interrupts */ buf |= INT_ENP_PHY_INT; ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret); /* allow mac to detect speed and duplex from phy */ ret = smsc75xx_read_reg(dev, MAC_CR, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret); buf |= (MAC_CR_ADD | MAC_CR_ASD); ret = smsc75xx_write_reg(dev, MAC_CR, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret); ret = smsc75xx_read_reg(dev, MAC_TX, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_TX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read MAC_TX: %d\n", ret); buf |= MAC_TX_TXEN; ret = smsc75xx_write_reg(dev, MAC_TX, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_TX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_TX: %d\n", ret); netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read FCT_TX_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read FCT_TX_CTL: %d\n", ret); buf |= FCT_TX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FCT_TX_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FCT_TX_CTL: %d\n", ret); netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); ret = smsc75xx_set_rx_max_frame_length(dev, 1514); - if (ret < 0) { - netdev_warn(dev->net, "Failed to set max rx frame length\n"); - return ret; - } + check_warn_return(ret, "Failed to set max rx frame length\n"); ret = smsc75xx_read_reg(dev, MAC_RX, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read MAC_RX: %d\n", ret); buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_RX: %d\n", ret); netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read FCT_RX_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read FCT_RX_CTL: %d\n", ret); buf |= FCT_RX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FCT_RX_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FCT_RX_CTL: %d\n", ret); netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf); @@ -1389,10 +1149,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); ret = usbnet_get_endpoints(dev, intf); - if (ret < 0) { - netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret); - return ret; - } + check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret); dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), GFP_KERNEL); @@ -1424,10 +1181,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc75xx_reset(dev); - if (ret < 0) { - netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); - return ret; - } + check_warn_return(ret, "smsc75xx_reset error %d\n", ret); dev->net->netdev_ops = &smsc75xx_netdev_ops; dev->net->ethtool_ops = &smsc75xx_ethtool_ops; @@ -1461,34 +1215,19 @@ static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg, int ret; ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUF_CFGX\n"); - return ret; - } + check_warn_return(ret, "Error writing WUF_CFGX\n"); ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUF_MASKX\n"); - return ret; - } + check_warn_return(ret, "Error writing WUF_MASKX\n"); ret = smsc75xx_write_reg(dev, mask_base + 4, 0); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUF_MASKX\n"); - return ret; - } + check_warn_return(ret, "Error writing WUF_MASKX\n"); ret = smsc75xx_write_reg(dev, mask_base + 8, 0); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUF_MASKX\n"); - return ret; - } + check_warn_return(ret, "Error writing WUF_MASKX\n"); ret = smsc75xx_write_reg(dev, mask_base + 12, 0); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUF_MASKX\n"); - return ret; - } + check_warn_return(ret, "Error writing WUF_MASKX\n"); return 0; } @@ -1500,19 +1239,13 @@ static int smsc75xx_enter_suspend0(struct usbnet *dev) int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading PMT_CTL\n"); val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST)); val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); pdata->suspend_flags |= SUSPEND_SUSPEND0; @@ -1526,29 +1259,20 @@ static int smsc75xx_enter_suspend1(struct usbnet *dev) int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading PMT_CTL\n"); val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_1; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); pdata->suspend_flags |= SUSPEND_SUSPEND1; @@ -1562,19 +1286,13 @@ static int smsc75xx_enter_suspend2(struct usbnet *dev) int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading PMT_CTL\n"); val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_2; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); pdata->suspend_flags |= SUSPEND_SUSPEND2; @@ -1588,10 +1306,7 @@ static int smsc75xx_enter_suspend3(struct usbnet *dev) int ret; ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading FCT_RX_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading FCT_RX_CTL\n"); if (val & FCT_RX_CTL_RXUSED) { netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n"); @@ -1599,29 +1314,20 @@ static int smsc75xx_enter_suspend3(struct usbnet *dev) } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading PMT_CTL\n"); val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); /* clear wol status */ val &= ~PMT_CTL_WUPS; val |= PMT_CTL_WUPS_WOL; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); pdata->suspend_flags |= SUSPEND_SUSPEND3; @@ -1637,17 +1343,11 @@ static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) /* read to clear */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); - return ret; - } + check_warn_return(ret, "Error reading PHY_INT_SRC\n"); /* enable interrupt source */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_MASK\n"); - return ret; - } + check_warn_return(ret, "Error reading PHY_INT_MASK\n"); ret |= mask; @@ -1663,16 +1363,10 @@ static int smsc75xx_link_ok_nopm(struct usbnet *dev) /* first, a dummy read, needed to latch some MII phys */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_BMSR\n"); - return ret; - } + check_warn_return(ret, "Error reading MII_BMSR\n"); ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_BMSR\n"); - return ret; - } + check_warn_return(ret, "Error reading MII_BMSR\n"); return !!(ret & BMSR_LSTATUS); } @@ -1694,10 +1388,7 @@ static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up) /* enable PHY wakeup events for if cable is attached */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_ANEG_COMP); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - return ret; - } + check_warn_return(ret, "error enabling PHY wakeup ints\n"); netdev_info(dev->net, "entering SUSPEND1 mode\n"); return smsc75xx_enter_suspend1(dev); @@ -1706,10 +1397,7 @@ static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up) /* enable PHY wakeup events so we remote wakeup if cable is pulled */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_LINK_DOWN); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - return ret; - } + check_warn_return(ret, "error enabling PHY wakeup ints\n"); netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n"); return smsc75xx_enter_suspend3(dev); @@ -1723,10 +1411,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) int ret; ret = usbnet_suspend(intf, message); - if (ret < 0) { - netdev_warn(dev->net, "usbnet_suspend error\n"); - return ret; - } + check_warn_goto_done(ret, "usbnet_suspend error\n"); if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); @@ -1751,32 +1436,20 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) /* disable energy detect (link up) & wake up events */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val &= ~(WUCSR_MPEN | WUCSR_WUEN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading PMT_CTL\n"); val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing PMT_CTL\n"); ret = smsc75xx_enter_suspend2(dev); goto done; @@ -1785,10 +1458,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) if (pdata->wolopts & WAKE_PHY) { ret = smsc75xx_enable_phy_wakeup_interrupts(dev, (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN)); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - goto done; - } + check_warn_goto_done(ret, "error enabling PHY wakeup ints\n"); /* if link is down then configure EDPD and enter SUSPEND1, * otherwise enter SUSPEND0 below @@ -1800,10 +1470,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) /* enable energy detect power-down mode */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading PHY_MODE_CTRL_STS\n"); ret |= MODE_CTRL_STS_EDPWRDOWN; @@ -1822,10 +1489,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) /* disable all filters */ for (i = 0; i < WUF_NUM; i++) { ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUF_CFGX\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUF_CFGX\n"); } if (pdata->wolopts & WAKE_MCAST) { @@ -1835,10 +1499,7 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST | smsc_crc(mcast, 3); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007); - if (ret < 0) { - netdev_warn(dev->net, "Error writing wakeup filter\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing wakeup filter\n"); } if (pdata->wolopts & WAKE_ARP) { @@ -1848,159 +1509,102 @@ static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16) | smsc_crc(arp, 2); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003); - if (ret < 0) { - netdev_warn(dev->net, "Error writing wakeup filter\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing wakeup filter\n"); } /* clear any pending pattern match packet status */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val |= WUCSR_WUFR; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); netdev_info(dev->net, "enabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val |= WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); } else { netdev_info(dev->net, "disabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val &= ~WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); } /* disable magic, bcast & unicast wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); if (pdata->wolopts & WAKE_PHY) { netdev_info(dev->net, "enabling PHY wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading PMT_CTL\n"); /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing PMT_CTL\n"); } if (pdata->wolopts & WAKE_MAGIC) { netdev_info(dev->net, "enabling magic packet wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); /* clear any pending magic packet status */ val |= WUCSR_MPR | WUCSR_MPEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); } if (pdata->wolopts & WAKE_BCAST) { netdev_info(dev->net, "enabling broadcast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val |= WUCSR_BCAST_FR | WUCSR_BCST_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); } if (pdata->wolopts & WAKE_UCAST) { netdev_info(dev->net, "enabling unicast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading WUCSR\n"); val |= WUCSR_WUFR | WUCSR_PFDA_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing WUCSR\n"); } /* enable receiver to enable frame reception */ ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); - goto done; - } + check_warn_goto_done(ret, "Failed to read MAC_RX: %d\n", ret); val |= MAC_RX_RXEN; ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); - goto done; - } + check_warn_goto_done(ret, "Failed to write MAC_RX: %d\n", ret); /* some wol options are enabled, so enter SUSPEND0 */ netdev_info(dev->net, "entering SUSPEND0 mode\n"); @@ -2028,60 +1632,39 @@ static int smsc75xx_resume(struct usb_interface *intf) if (suspend_flags & SUSPEND_ALLMODES) { /* Disable wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - return ret; - } + check_warn_return(ret, "Error reading WUCSR\n"); val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN | WUCSR_BCST_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - return ret; - } + check_warn_return(ret, "Error writing WUCSR\n"); /* clear wake-up status */ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading PMT_CTL\n"); val &= ~PMT_CTL_WOL_EN; val |= PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); } if (suspend_flags & SUSPEND_SUSPEND2) { netdev_info(dev->net, "resuming from SUSPEND2\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error reading PMT_CTL\n"); val |= PMT_CTL_PHY_PWRUP; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PMT_CTL\n"); - return ret; - } + check_warn_return(ret, "Error writing PMT_CTL\n"); } ret = smsc75xx_wait_ready(dev, 1); - if (ret < 0) { - netdev_warn(dev->net, "device not ready in smsc75xx_resume\n"); - return ret; - } + check_warn_return(ret, "device not ready in smsc75xx_resume\n"); return usbnet_resume(intf); } diff --git a/trunk/drivers/net/usb/smsc95xx.c b/trunk/drivers/net/usb/smsc95xx.c index f7e1e189fa4a..79d495d15546 100644 --- a/trunk/drivers/net/usb/smsc95xx.c +++ b/trunk/drivers/net/usb/smsc95xx.c @@ -55,6 +55,15 @@ #define FEATURE_PHY_NLP_CROSSOVER (0x02) #define FEATURE_AUTOSUSPEND (0x04) +#define check_warn(ret, fmt, args...) \ + ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) + +#define check_warn_return(ret, fmt, args...) \ + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } }) + +#define check_warn_goto_done(ret, fmt, args...) \ + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } }) + struct smsc95xx_priv { u32 mac_cr; u32 hash_hi; @@ -145,6 +154,25 @@ static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index, { return __smsc95xx_write_reg(dev, index, data, 0); } +static int smsc95xx_set_feature(struct usbnet *dev, u32 feature) +{ + if (WARN_ON_ONCE(!dev)) + return -EINVAL; + + return usbnet_write_cmd_nopm(dev, USB_REQ_SET_FEATURE, + USB_RECIP_DEVICE, feature, 0, + NULL, 0); +} + +static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature) +{ + if (WARN_ON_ONCE(!dev)) + return -EINVAL; + + return usbnet_write_cmd_nopm(dev, USB_REQ_CLEAR_FEATURE, + USB_RECIP_DEVICE, feature, + 0, NULL, 0); +} /* Loop until the read is completed with timeout * called with phy_mutex held */ @@ -157,11 +185,7 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_ACCESS\n"); - return ret; - } - + check_warn_return(ret, "Error reading MII_ACCESS\n"); if (!(val & MII_BUSY_)) return 0; } while (!time_after(jiffies, start_time + HZ)); @@ -180,32 +204,20 @@ static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx, /* confirm MII not busy */ ret = __smsc95xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n"); - goto done; - } + check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read\n"); /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_; ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing MII_ADDR\n"); ret = __smsc95xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); - goto done; - } + check_warn_goto_done(ret, "Timed out reading MII reg %02X\n", idx); ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); - goto done; - } + check_warn_goto_done(ret, "Error reading MII_DATA\n"); ret = (u16)(val & 0xFFFF); @@ -225,33 +237,21 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id, /* confirm MII not busy */ ret = __smsc95xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n"); - goto done; - } + check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write\n"); val = regval; ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing MII_DATA\n"); /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_; ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); - goto done; - } + check_warn_goto_done(ret, "Error writing MII_ADDR\n"); ret = __smsc95xx_phy_wait_not_busy(dev, in_pm); - if (ret < 0) { - netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); - goto done; - } + check_warn_goto_done(ret, "Timed out writing MII reg %02X\n", idx); done: mutex_unlock(&dev->phy_mutex); @@ -288,11 +288,7 @@ static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev) do { ret = smsc95xx_read_reg(dev, E2P_CMD, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading E2P_CMD\n"); - return ret; - } - + check_warn_return(ret, "Error reading E2P_CMD\n"); if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_)) break; udelay(40); @@ -314,10 +310,7 @@ static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev) do { ret = smsc95xx_read_reg(dev, E2P_CMD, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error reading E2P_CMD\n"); if (!(val & E2P_CMD_BUSY_)) return 0; @@ -345,20 +338,14 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, for (i = 0; i < length; i++) { val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_); ret = smsc95xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_CMD\n"); ret = smsc95xx_wait_eeprom(dev); if (ret < 0) return ret; ret = smsc95xx_read_reg(dev, E2P_DATA, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading E2P_DATA\n"); - return ret; - } + check_warn_return(ret, "Error reading E2P_DATA\n"); data[i] = val & 0xFF; offset++; @@ -383,10 +370,7 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, /* Issue write/erase enable command */ val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_; ret = smsc95xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_DATA\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_DATA\n"); ret = smsc95xx_wait_eeprom(dev); if (ret < 0) @@ -397,18 +381,12 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, /* Fill data register */ val = data[i]; ret = smsc95xx_write_reg(dev, E2P_DATA, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_DATA\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_DATA\n"); /* Send "write" command */ val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_); ret = smsc95xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing E2P_CMD\n"); - return ret; - } + check_warn_return(ret, "Error writing E2P_CMD\n"); ret = smsc95xx_wait_eeprom(dev); if (ret < 0) @@ -491,16 +469,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev) /* Initiate async writes, as we can't wait for completion here */ ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); - if (ret < 0) - netdev_warn(dev->net, "failed to initiate async write to HASHH\n"); + check_warn(ret, "failed to initiate async write to HASHH\n"); ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); - if (ret < 0) - netdev_warn(dev->net, "failed to initiate async write to HASHL\n"); + check_warn(ret, "failed to initiate async write to HASHL\n"); ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); - if (ret < 0) - netdev_warn(dev->net, "failed to initiate async write to MAC_CR\n"); + check_warn(ret, "failed to initiate async write to MAC_CR\n"); } static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, @@ -509,10 +484,7 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, u32 flow, afc_cfg = 0; int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); - if (ret < 0) { - netdev_warn(dev->net, "Error reading AFC_CFG\n"); - return ret; - } + check_warn_return(ret, "Error reading AFC_CFG\n"); if (duplex == DUPLEX_FULL) { u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); @@ -537,16 +509,12 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, } ret = smsc95xx_write_reg(dev, FLOW, flow); - if (ret < 0) { - netdev_warn(dev->net, "Error writing FLOW\n"); - return ret; - } + check_warn_return(ret, "Error writing FLOW\n"); ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); - if (ret < 0) - netdev_warn(dev->net, "Error writing AFC_CFG\n"); + check_warn_return(ret, "Error writing AFC_CFG\n"); - return ret; + return 0; } static int smsc95xx_link_reset(struct usbnet *dev) @@ -560,16 +528,10 @@ static int smsc95xx_link_reset(struct usbnet *dev) /* clear interrupt status */ ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); - return ret; - } + check_warn_return(ret, "Error reading PHY_INT_SRC\n"); ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); - if (ret < 0) { - netdev_warn(dev->net, "Error writing INT_STS\n"); - return ret; - } + check_warn_return(ret, "Error writing INT_STS\n"); mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); @@ -591,16 +553,12 @@ static int smsc95xx_link_reset(struct usbnet *dev) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); - if (ret < 0) { - netdev_warn(dev->net, "Error writing MAC_CR\n"); - return ret; - } + check_warn_return(ret, "Error writing MAC_CR\n"); ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); - if (ret < 0) - netdev_warn(dev->net, "Error updating PHY flow control\n"); + check_warn_return(ret, "Error updating PHY flow control\n"); - return ret; + return 0; } static void smsc95xx_status(struct usbnet *dev, struct urb *urb) @@ -634,10 +592,7 @@ static int smsc95xx_set_features(struct net_device *netdev, int ret; ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read COE_CR: %d\n", ret); if (features & NETIF_F_HW_CSUM) read_buf |= Tx_COE_EN_; @@ -650,10 +605,7 @@ static int smsc95xx_set_features(struct net_device *netdev, read_buf &= ~Rx_COE_EN_; ret = smsc95xx_write_reg(dev, COE_CR, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write COE_CR: %d\n", ret); netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf); return 0; @@ -733,15 +685,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net, { struct usbnet *dev = netdev_priv(net); struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); - int ret; pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; - - ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); - if (ret < 0) - netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret); - - return ret; + return 0; } static const struct ethtool_ops smsc95xx_ethtool_ops = { @@ -796,16 +742,12 @@ static int smsc95xx_set_mac_address(struct usbnet *dev) int ret; ret = smsc95xx_write_reg(dev, ADDRL, addr_lo); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write ADDRL: %d\n", ret); ret = smsc95xx_write_reg(dev, ADDRH, addr_hi); - if (ret < 0) - netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret); + check_warn_return(ret, "Failed to write ADDRH: %d\n", ret); - return ret; + return 0; } /* starts the TX path */ @@ -821,17 +763,13 @@ static int smsc95xx_start_tx_path(struct usbnet *dev) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret); /* Enable Tx at SCSRs */ ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_); - if (ret < 0) - netdev_warn(dev->net, "Failed to write TX_CFG: %d\n", ret); + check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret); - return ret; + return 0; } /* Starts the Receive path */ @@ -846,10 +784,9 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm); - if (ret < 0) - netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); + check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret); - return ret; + return 0; } static int smsc95xx_phy_initialize(struct usbnet *dev) @@ -884,10 +821,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev) /* read to clear */ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read PHY_INT_SRC during init\n"); - return ret; - } + check_warn_return(ret, "Failed to read PHY_INT_SRC during init\n"); smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, PHY_INT_MASK_DEFAULT_); @@ -906,19 +840,13 @@ static int smsc95xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n"); ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n"); - return ret; - } + check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n"); timeout = 0; do { msleep(10); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); timeout++; } while ((read_buf & HW_CFG_LRST_) && (timeout < 100)); @@ -928,19 +856,13 @@ static int smsc95xx_reset(struct usbnet *dev) } ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret); timeout = 0; do { msleep(10); ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret); timeout++; } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); @@ -957,10 +879,7 @@ static int smsc95xx_reset(struct usbnet *dev) dev->net->dev_addr); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n", read_buf); @@ -968,17 +887,10 @@ static int smsc95xx_reset(struct usbnet *dev) read_buf |= HW_CFG_BIR_; ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n"); - return ret; - } + check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n"); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } - + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n", read_buf); @@ -998,42 +910,27 @@ static int smsc95xx_reset(struct usbnet *dev) (ulong)dev->rx_urb_size); ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret); ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x\n", read_buf); ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret); ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x\n", read_buf); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n", read_buf); @@ -1047,111 +944,66 @@ static int smsc95xx_reset(struct usbnet *dev) read_buf |= NET_IP_ALIGN << 9; ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret); ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing: 0x%08x\n", read_buf); ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write INT_STS: %d\n", ret); ret = smsc95xx_read_reg(dev, ID_REV, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read ID_REV: %d\n", ret); netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf); /* Configure GPIO pins as LED outputs */ write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | LED_GPIO_CFG_FDX_LED; ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret); /* Init Tx */ ret = smsc95xx_write_reg(dev, FLOW, 0); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write FLOW: %d\n", ret); ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret); /* Don't need mac_cr_lock during initialisation */ ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret); /* Init Rx */ /* Set Vlan */ ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write VLAN1: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write VLAN1: %d\n", ret); /* Enable or disable checksum offload engines */ ret = smsc95xx_set_features(dev->net, dev->net->features); - if (ret < 0) { - netdev_warn(dev->net, "Failed to set checksum offload features\n"); - return ret; - } + check_warn_return(ret, "Failed to set checksum offload features\n"); smsc95xx_set_multicast(dev->net); ret = smsc95xx_phy_initialize(dev); - if (ret < 0) { - netdev_warn(dev->net, "Failed to init PHY\n"); - return ret; - } + check_warn_return(ret, "Failed to init PHY\n"); ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret); /* enable PHY interrupts */ read_buf |= INT_EP_CTL_PHY_INT_; ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf); - if (ret < 0) { - netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret); ret = smsc95xx_start_tx_path(dev); - if (ret < 0) { - netdev_warn(dev->net, "Failed to start TX path\n"); - return ret; - } + check_warn_return(ret, "Failed to start TX path\n"); ret = smsc95xx_start_rx_path(dev, 0); - if (ret < 0) { - netdev_warn(dev->net, "Failed to start RX path\n"); - return ret; - } + check_warn_return(ret, "Failed to start RX path\n"); netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n"); return 0; @@ -1179,10 +1031,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); ret = usbnet_get_endpoints(dev, intf); - if (ret < 0) { - netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret); - return ret; - } + check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret); dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv), GFP_KERNEL); @@ -1209,10 +1058,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); - if (ret < 0) { - netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); - return ret; - } + check_warn_return(ret, "Failed to read ID_REV: %d\n", ret); val >>= 16; if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) || @@ -1242,10 +1088,9 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) } } -static u32 smsc_crc(const u8 *buffer, size_t len, int filter) +static u16 smsc_crc(const u8 *buffer, size_t len, int filter) { - u32 crc = bitrev16(crc16(0xFFFF, buffer, len)); - return crc << ((filter % 2) * 16); + return bitrev16(crc16(0xFFFF, buffer, len)) << ((filter % 2) * 16); } static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) @@ -1257,17 +1102,11 @@ static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) /* read to clear */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); - return ret; - } + check_warn_return(ret, "Error reading PHY_INT_SRC\n"); /* enable interrupt source */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_INT_MASK\n"); - return ret; - } + check_warn_return(ret, "Error reading PHY_INT_MASK\n"); ret |= mask; @@ -1283,16 +1122,10 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev) /* first, a dummy read, needed to latch some MII phys */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_BMSR\n"); - return ret; - } + check_warn_return(ret, "Error reading MII_BMSR\n"); ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); - if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_BMSR\n"); - return ret; - } + check_warn_return(ret, "Error reading MII_BMSR\n"); return !!(ret & BMSR_LSTATUS); } @@ -1304,19 +1137,13 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev) int ret; ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error reading PM_CTRL\n"); val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_)); val |= PM_CTL_SUS_MODE_0; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error writing PM_CTRL\n"); /* clear wol status */ val &= ~PM_CTL_WUPS_; @@ -1327,17 +1154,15 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev) val |= PM_CTL_WUPS_ED_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error writing PM_CTRL\n"); /* read back PM_CTRL */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) - netdev_warn(dev->net, "Error reading PM_CTRL\n"); + check_warn_return(ret, "Error reading PM_CTRL\n"); - return ret; + smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP); + + return 0; } static int smsc95xx_enter_suspend1(struct usbnet *dev) @@ -1356,10 +1181,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev) /* enable energy detect power-down mode */ ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n"); - return ret; - } + check_warn_return(ret, "Error reading PHY_MODE_CTRL_STS\n"); ret |= MODE_CTRL_STS_EDPWRDOWN_; @@ -1367,29 +1189,24 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev) /* enter SUSPEND1 mode */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error reading PM_CTRL\n"); val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_); val |= PM_CTL_SUS_MODE_1; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error writing PM_CTRL\n"); /* clear wol status, enable energy detection */ val &= ~PM_CTL_WUPS_; val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_); ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + check_warn_return(ret, "Error writing PM_CTRL\n"); - return ret; + smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP); + + return 0; } static int smsc95xx_enter_suspend2(struct usbnet *dev) @@ -1398,19 +1215,15 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev) int ret; ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error reading PM_CTRL\n"); val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_); val |= PM_CTL_SUS_MODE_2; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) - netdev_warn(dev->net, "Error writing PM_CTRL\n"); + check_warn_return(ret, "Error writing PM_CTRL\n"); - return ret; + return 0; } static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) @@ -1421,10 +1234,7 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) int ret; ret = usbnet_suspend(intf, message); - if (ret < 0) { - netdev_warn(dev->net, "usbnet_suspend error\n"); - return ret; - } + check_warn_return(ret, "usbnet_suspend error\n"); /* determine if link is up using only _nopm functions */ link_up = smsc95xx_link_ok_nopm(dev); @@ -1438,57 +1248,40 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) /* disable energy detect (link up) & wake up events */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error reading WUCSR\n"); val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_); ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error writing WUCSR\n"); ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); - goto done; - } + check_warn_return(ret, "Error reading PM_CTRL\n"); val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_); ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); - goto done; - } + check_warn_return(ret, "Error writing PM_CTRL\n"); - ret = smsc95xx_enter_suspend2(dev); - goto done; + return smsc95xx_enter_suspend2(dev); } if (pdata->wolopts & WAKE_PHY) { ret = smsc95xx_enable_phy_wakeup_interrupts(dev, (PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_)); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - goto done; - } + check_warn_return(ret, "error enabling PHY wakeup ints\n"); /* if link is down then configure EDPD and enter SUSPEND1, * otherwise enter SUSPEND0 below */ if (!link_up) { netdev_info(dev->net, "entering SUSPEND1 mode\n"); - ret = smsc95xx_enter_suspend1(dev); - goto done; + return smsc95xx_enter_suspend1(dev); } } if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) { - u32 *filter_mask = kzalloc(sizeof(u32) * 32, GFP_KERNEL); + u32 *filter_mask = kzalloc(32, GFP_KERNEL); u32 command[2]; u32 offset[2]; u32 crc[4]; @@ -1497,12 +1290,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) LAN9500A_WUFF_NUM : LAN9500_WUFF_NUM; int i, filter = 0; - if (!filter_mask) { - netdev_warn(dev->net, "Unable to allocate filter_mask\n"); - ret = -ENOMEM; - goto done; - } - memset(command, 0, sizeof(command)); memset(offset, 0, sizeof(offset)); memset(crc, 0, sizeof(crc)); @@ -1560,77 +1347,51 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) for (i = 0; i < (wuff_filter_count * 4); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); + if (ret < 0) kfree(filter_mask); - goto done; - } + check_warn_return(ret, "Error writing WUFF\n"); } kfree(filter_mask); for (i = 0; i < (wuff_filter_count / 4); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); - goto done; - } + check_warn_return(ret, "Error writing WUFF\n"); } for (i = 0; i < (wuff_filter_count / 4); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); - goto done; - } + check_warn_return(ret, "Error writing WUFF\n"); } for (i = 0; i < (wuff_filter_count / 2); i++) { ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUFF\n"); - goto done; - } + check_warn_return(ret, "Error writing WUFF\n"); } /* clear any pending pattern match packet status */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error reading WUCSR\n"); val |= WUCSR_WUFR_; ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error writing WUCSR\n"); } if (pdata->wolopts & WAKE_MAGIC) { /* clear any pending magic packet status */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error reading WUCSR\n"); val |= WUCSR_MPR_; ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error writing WUCSR\n"); } /* enable/disable wakeup sources */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error reading WUCSR\n"); if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) { netdev_info(dev->net, "enabling pattern match wakeup\n"); @@ -1649,17 +1410,11 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) } ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - goto done; - } + check_warn_return(ret, "Error writing WUCSR\n"); /* enable wol wakeup source */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); - goto done; - } + check_warn_return(ret, "Error reading PM_CTRL\n"); val |= PM_CTL_WOL_EN_; @@ -1668,22 +1423,14 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) val |= PM_CTL_ED_EN_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); - goto done; - } + check_warn_return(ret, "Error writing PM_CTRL\n"); /* enable receiver to enable frame reception */ smsc95xx_start_rx_path(dev, 1); /* some wol options are enabled, so enter SUSPEND0 */ netdev_info(dev->net, "entering SUSPEND0 mode\n"); - ret = smsc95xx_enter_suspend0(dev); - -done: - if (ret) - usbnet_resume(intf); - return ret; + return smsc95xx_enter_suspend0(dev); } static int smsc95xx_resume(struct usb_interface *intf) @@ -1696,43 +1443,32 @@ static int smsc95xx_resume(struct usb_interface *intf) BUG_ON(!dev); if (pdata->wolopts) { + smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP); + /* clear wake-up sources */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading WUCSR\n"); - return ret; - } + check_warn_return(ret, "Error reading WUCSR\n"); val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_); ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing WUCSR\n"); - return ret; - } + check_warn_return(ret, "Error writing WUCSR\n"); /* clear wake-up status */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); - if (ret < 0) { - netdev_warn(dev->net, "Error reading PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error reading PM_CTRL\n"); val &= ~PM_CTL_WOL_EN_; val |= PM_CTL_WUPS_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); - if (ret < 0) { - netdev_warn(dev->net, "Error writing PM_CTRL\n"); - return ret; - } + check_warn_return(ret, "Error writing PM_CTRL\n"); } ret = usbnet_resume(intf); - if (ret < 0) - netdev_warn(dev->net, "usbnet_resume error\n"); + check_warn_return(ret, "usbnet_resume error\n"); - return ret; + return 0; } static void smsc95xx_rx_csum_offload(struct sk_buff *skb) diff --git a/trunk/drivers/net/veth.c b/trunk/drivers/net/veth.c index 95814d9747ef..24f6b27e169f 100644 --- a/trunk/drivers/net/veth.c +++ b/trunk/drivers/net/veth.c @@ -340,7 +340,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (IS_ERR(net)) return PTR_ERR(net); - peer = rtnl_create_link(net, ifname, &veth_link_ops, tbp); + peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); diff --git a/trunk/include/linux/ipv6.h b/trunk/include/linux/ipv6.h index 12729e966dc9..5e11905a4f01 100644 --- a/trunk/include/linux/ipv6.h +++ b/trunk/include/linux/ipv6.h @@ -364,22 +364,20 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ - ((inet_sk(__sk)->inet_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ - ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) - -#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ - ((inet_twsk(__sk)->tw_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr)) && \ - ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) +#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ + ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \ + ((__sk)->sk_family == AF_INET6) && \ + ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ + ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) + +#define INET6_TW_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ + (*((__portpair *)&(inet_twsk(__sk)->tw_dport)) == (__ports)) && \ + ((__sk)->sk_family == PF_INET6) && \ + (ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))) && \ + (ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr))) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #endif /* _IPV6_H */ diff --git a/trunk/include/linux/netfilter_ipv6/ip6_tables.h b/trunk/include/linux/netfilter_ipv6/ip6_tables.h index 610208b18c05..5f84c6229dc6 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6_tables.h +++ b/trunk/include/linux/netfilter_ipv6/ip6_tables.h @@ -47,6 +47,15 @@ ip6t_ext_hdr(u8 nexthdr) (nexthdr == IPPROTO_DSTOPTS); } +enum { + IP6T_FH_F_FRAG = (1 << 0), + IP6T_FH_F_AUTH = (1 << 1), +}; + +/* find specified header and get offset to it */ +extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *fragflg); + #ifdef CONFIG_COMPAT #include diff --git a/trunk/include/linux/openvswitch.h b/trunk/include/linux/openvswitch.h index d42e174bd0c8..eb1efa54fe84 100644 --- a/trunk/include/linux/openvswitch.h +++ b/trunk/include/linux/openvswitch.h @@ -243,7 +243,6 @@ enum ovs_key_attr { OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ - OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */ __OVS_KEY_ATTR_MAX }; diff --git a/trunk/include/net/inet_hashtables.h b/trunk/include/net/inet_hashtables.h index d1de4fbd45c2..54be0287eb98 100644 --- a/trunk/include/net/inet_hashtables.h +++ b/trunk/include/net/inet_hashtables.h @@ -299,34 +299,30 @@ typedef __u64 __bitwise __addrpair; (((__force __u64)(__be32)(__daddr)) << 32) | \ ((__force __u64)(__be32)(__saddr))); #endif /* __BIG_ENDIAN */ -#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_sk(__sk)->inet_portpair == (__ports)) && \ - (inet_sk(__sk)->inet_addrpair == (__cookie)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) -#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\ - ((inet_twsk(__sk)->tw_portpair == (__ports)) && \ - (inet_twsk(__sk)->tw_addrpair == (__cookie)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) +#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \ + ((*((__addrpair *)&(inet_sk(__sk)->inet_daddr))) == (__cookie)) && \ + ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) +#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \ + ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ + ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #else /* 32-bit arch */ #define INET_ADDR_COOKIE(__name, __saddr, __daddr) -#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_sk(__sk)->inet_portpair == (__ports)) && \ - (inet_sk(__sk)->inet_daddr == (__saddr)) && \ - (inet_sk(__sk)->inet_rcv_saddr == (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) -#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_twsk(__sk)->tw_portpair == (__ports)) && \ - (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ - (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) +#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \ + (inet_sk(__sk)->inet_daddr == (__saddr)) && \ + (inet_sk(__sk)->inet_rcv_saddr == (__daddr)) && \ + ((*((__portpair *)&(inet_sk(__sk)->inet_dport))) == (__ports)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) +#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \ + (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ + (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ + ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ + (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #endif /* 64-bit arch */ /* diff --git a/trunk/include/net/inet_sock.h b/trunk/include/net/inet_sock.h index a4196cbc84ec..256c1ed2d69a 100644 --- a/trunk/include/net/inet_sock.h +++ b/trunk/include/net/inet_sock.h @@ -144,11 +144,9 @@ struct inet_sock { /* Socket demultiplex comparisons on incoming packets. */ #define inet_daddr sk.__sk_common.skc_daddr #define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr -#define inet_addrpair sk.__sk_common.skc_addrpair -#define inet_dport sk.__sk_common.skc_dport -#define inet_num sk.__sk_common.skc_num -#define inet_portpair sk.__sk_common.skc_portpair + __be16 inet_dport; + __u16 inet_num; __be32 inet_saddr; __s16 uc_ttl; __u16 cmsg_flags; @@ -156,7 +154,6 @@ struct inet_sock { __u16 inet_id; struct ip_options_rcu __rcu *inet_opt; - int rx_dst_ifindex; __u8 tos; __u8 min_ttl; __u8 mc_ttl; @@ -173,6 +170,7 @@ struct inet_sock { int uc_index; int mc_index; __be32 mc_addr; + int rx_dst_ifindex; struct ip_mc_socklist __rcu *mc_list; struct inet_cork_full cork; }; diff --git a/trunk/include/net/inet_timewait_sock.h b/trunk/include/net/inet_timewait_sock.h index 7d658d577368..ba52c830a7a5 100644 --- a/trunk/include/net/inet_timewait_sock.h +++ b/trunk/include/net/inet_timewait_sock.h @@ -112,11 +112,6 @@ struct inet_timewait_sock { #define tw_net __tw_common.skc_net #define tw_daddr __tw_common.skc_daddr #define tw_rcv_saddr __tw_common.skc_rcv_saddr -#define tw_addrpair __tw_common.skc_addrpair -#define tw_dport __tw_common.skc_dport -#define tw_num __tw_common.skc_num -#define tw_portpair __tw_common.skc_portpair - int tw_timeout; volatile unsigned char tw_substate; unsigned char tw_rcv_wscale; @@ -124,6 +119,8 @@ struct inet_timewait_sock { /* Socket demultiplex comparisons on incoming packets. */ /* these three are in inet_sock */ __be16 tw_sport; + __be16 tw_dport; + __u16 tw_num; kmemcheck_bitfield_begin(flags); /* And these are ours. */ unsigned int tw_ipv6only : 1, diff --git a/trunk/include/net/ipv6.h b/trunk/include/net/ipv6.h index acbd8e034310..979bf6c13141 100644 --- a/trunk/include/net/ipv6.h +++ b/trunk/include/net/ipv6.h @@ -630,16 +630,6 @@ extern int ipv6_skip_exthdr(const struct sk_buff *, int start, extern bool ipv6_ext_hdr(u8 nexthdr); -enum { - IP6_FH_F_FRAG = (1 << 0), - IP6_FH_F_AUTH = (1 << 1), - IP6_FH_F_SKIP_RH = (1 << 2), -}; - -/* find specified header and get offset to it */ -extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff, int *fragflg); - extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); extern struct in6_addr *fl6_update_dst(struct flowi6 *fl6, diff --git a/trunk/include/net/rtnetlink.h b/trunk/include/net/rtnetlink.h index 5a15fabd6a75..6b00c4fc4291 100644 --- a/trunk/include/net/rtnetlink.h +++ b/trunk/include/net/rtnetlink.h @@ -125,7 +125,7 @@ extern void rtnl_af_unregister(struct rtnl_af_ops *ops); extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); -extern struct net_device *rtnl_create_link(struct net *net, +extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net, char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]); extern int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index c4132c1b63a8..c945fba4f543 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -132,8 +132,6 @@ struct net; * @skc_rcv_saddr: Bound local IPv4 addr * @skc_hash: hash value used with various protocol lookup tables * @skc_u16hashes: two u16 hash values used by UDP lookup tables - * @skc_dport: placeholder for inet_dport/tw_dport - * @skc_num: placeholder for inet_num/tw_num * @skc_family: network address family * @skc_state: Connection state * @skc_reuse: %SO_REUSEADDR setting @@ -151,29 +149,16 @@ struct net; * for struct sock and struct inet_timewait_sock. */ struct sock_common { - /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned - * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH() + /* skc_daddr and skc_rcv_saddr must be grouped : + * cf INET_MATCH() and INET_TW_MATCH() */ - union { - unsigned long skc_addrpair; - struct { - __be32 skc_daddr; - __be32 skc_rcv_saddr; - }; - }; + __be32 skc_daddr; + __be32 skc_rcv_saddr; + union { unsigned int skc_hash; __u16 skc_u16hashes[2]; }; - /* skc_dport && skc_num must be grouped as well */ - union { - u32 skc_portpair; - struct { - __be16 skc_dport; - __u16 skc_num; - }; - }; - unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse; diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index a292e8050ef2..afba51e60310 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -242,7 +242,6 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) * hope the underlying device can handle it. */ new_dev->mtu = real_dev->mtu; - new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); vlan_dev_priv(new_dev)->vlan_id = vlan_id; vlan_dev_priv(new_dev)->real_dev = real_dev; diff --git a/trunk/net/batman-adv/hard-interface.c b/trunk/net/batman-adv/hard-interface.c index f1d37cd81815..365ed74f3946 100644 --- a/trunk/net/batman-adv/hard-interface.c +++ b/trunk/net/batman-adv/hard-interface.c @@ -30,7 +30,6 @@ #include "bridge_loop_avoidance.h" #include -#include void batadv_hardif_free_rcu(struct rcu_head *rcu) { @@ -312,7 +311,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, { struct batadv_priv *bat_priv; struct net_device *soft_iface; - __be16 ethertype = __constant_htons(ETH_P_BATMAN); + __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); int ret; if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) diff --git a/trunk/net/batman-adv/packet.h b/trunk/net/batman-adv/packet.h index cb6405bf755c..1c5454d33f67 100644 --- a/trunk/net/batman-adv/packet.h +++ b/trunk/net/batman-adv/packet.h @@ -20,6 +20,8 @@ #ifndef _NET_BATMAN_ADV_PACKET_H_ #define _NET_BATMAN_ADV_PACKET_H_ +#define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ + enum batadv_packettype { BATADV_IV_OGM = 0x01, BATADV_ICMP = 0x02, diff --git a/trunk/net/batman-adv/send.c b/trunk/net/batman-adv/send.c index 4425af9dad40..c7f702376535 100644 --- a/trunk/net/batman-adv/send.c +++ b/trunk/net/batman-adv/send.c @@ -28,8 +28,6 @@ #include "gateway_common.h" #include "originator.h" -#include - static void batadv_send_outstanding_bcast_packet(struct work_struct *work); /* send out an already prepared packet to the given address via the @@ -62,11 +60,11 @@ int batadv_send_skb_packet(struct sk_buff *skb, ethhdr = (struct ethhdr *)skb_mac_header(skb); memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); - ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); + ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN); skb_set_network_header(skb, ETH_HLEN); skb->priority = TC_PRIO_CONTROL; - skb->protocol = __constant_htons(ETH_P_BATMAN); + skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN); skb->dev = hard_iface->net_dev; diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index 6b548fde8e04..54800c783f96 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -34,7 +34,6 @@ #include #include #include -#include #include "unicast.h" #include "bridge_loop_avoidance.h" @@ -147,7 +146,7 @@ static int batadv_interface_tx(struct sk_buff *skb, struct batadv_hard_iface *primary_if = NULL; struct batadv_bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; - __be16 ethertype = __constant_htons(ETH_P_BATMAN); + __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}; static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, @@ -173,7 +172,7 @@ static int batadv_interface_tx(struct sk_buff *skb, break; /* fall through */ - case ETH_P_BATMAN: + case BATADV_ETH_P_BATMAN: goto dropped; } @@ -303,7 +302,7 @@ void batadv_interface_rx(struct net_device *soft_iface, struct vlan_ethhdr *vhdr; struct batadv_header *batadv_header = (struct batadv_header *)skb->data; short vid __maybe_unused = -1; - __be16 ethertype = __constant_htons(ETH_P_BATMAN); + __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); bool is_bcast; is_bcast = (batadv_header->packet_type == BATADV_BCAST); @@ -326,7 +325,7 @@ void batadv_interface_rx(struct net_device *soft_iface, break; /* fall through */ - case ETH_P_BATMAN: + case BATADV_ETH_P_BATMAN: goto dropped; } diff --git a/trunk/net/core/rtnetlink.c b/trunk/net/core/rtnetlink.c index 1868625af25e..575a6ee89944 100644 --- a/trunk/net/core/rtnetlink.c +++ b/trunk/net/core/rtnetlink.c @@ -1642,7 +1642,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) } EXPORT_SYMBOL(rtnl_configure_link); -struct net_device *rtnl_create_link(struct net *net, +struct net_device *rtnl_create_link(struct net *src_net, struct net *net, char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) { int err; @@ -1840,7 +1840,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (IS_ERR(dest_net)) return PTR_ERR(dest_net); - dev = rtnl_create_link(dest_net, ifname, ops, tb); + dev = rtnl_create_link(net, dest_net, ifname, ops, tb); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out; diff --git a/trunk/net/ieee802154/6lowpan.c b/trunk/net/ieee802154/6lowpan.c index f651da60f161..6d42c17af96b 100644 --- a/trunk/net/ieee802154/6lowpan.c +++ b/trunk/net/ieee802154/6lowpan.c @@ -1047,8 +1047,7 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) goto error; } - /* Send directly if less than the MTU minus the 2 checksum bytes. */ - if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) { + if (skb->len <= IEEE802154_MTU) { err = dev_queue_xmit(skb); goto out; } diff --git a/trunk/net/ipv4/inet_hashtables.c b/trunk/net/ipv4/inet_hashtables.c index fa3ae8148710..7880af970208 100644 --- a/trunk/net/ipv4/inet_hashtables.c +++ b/trunk/net/ipv4/inet_hashtables.c @@ -237,14 +237,12 @@ struct sock *__inet_lookup_established(struct net *net, rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { - if (sk->sk_hash != hash) - continue; - if (likely(INET_MATCH(sk, net, acookie, - saddr, daddr, ports, dif))) { + if (INET_MATCH(sk, net, hash, acookie, + saddr, daddr, ports, dif)) { if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) goto begintw; - if (unlikely(!INET_MATCH(sk, net, acookie, - saddr, daddr, ports, dif))) { + if (unlikely(!INET_MATCH(sk, net, hash, acookie, + saddr, daddr, ports, dif))) { sock_put(sk); goto begin; } @@ -262,18 +260,14 @@ struct sock *__inet_lookup_established(struct net *net, begintw: /* Must check for a TIME_WAIT'er before going to listener hash. */ sk_nulls_for_each_rcu(sk, node, &head->twchain) { - if (sk->sk_hash != hash) - continue; - if (likely(INET_TW_MATCH(sk, net, acookie, - saddr, daddr, ports, - dif))) { + if (INET_TW_MATCH(sk, net, hash, acookie, + saddr, daddr, ports, dif)) { if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) { sk = NULL; goto out; } - if (unlikely(!INET_TW_MATCH(sk, net, acookie, - saddr, daddr, ports, - dif))) { + if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie, + saddr, daddr, ports, dif))) { sock_put(sk); goto begintw; } @@ -320,12 +314,10 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, /* Check TIME-WAIT sockets first. */ sk_nulls_for_each(sk2, node, &head->twchain) { - if (sk2->sk_hash != hash) - continue; + tw = inet_twsk(sk2); - if (likely(INET_TW_MATCH(sk2, net, acookie, - saddr, daddr, ports, dif))) { - tw = inet_twsk(sk2); + if (INET_TW_MATCH(sk2, net, hash, acookie, + saddr, daddr, ports, dif)) { if (twsk_unique(sk, sk2, twp)) goto unique; else @@ -336,10 +328,8 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, /* And established part... */ sk_nulls_for_each(sk2, node, &head->chain) { - if (sk2->sk_hash != hash) - continue; - if (likely(INET_MATCH(sk2, net, acookie, - saddr, daddr, ports, dif))) + if (INET_MATCH(sk2, net, hash, acookie, + saddr, daddr, ports, dif)) goto not_unique; } diff --git a/trunk/net/ipv6/exthdrs_core.c b/trunk/net/ipv6/exthdrs_core.c index c5e83fae4df4..e7d756e19d1d 100644 --- a/trunk/net/ipv6/exthdrs_core.c +++ b/trunk/net/ipv6/exthdrs_core.c @@ -155,127 +155,3 @@ int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) return -1; } EXPORT_SYMBOL_GPL(ipv6_find_tlv); - -/* - * find the offset to specified header or the protocol number of last header - * if target < 0. "last header" is transport protocol header, ESP, or - * "No next header". - * - * Note that *offset is used as input/output parameter. an if it is not zero, - * then it must be a valid offset to an inner IPv6 header. This can be used - * to explore inner IPv6 header, eg. ICMPv6 error messages. - * - * If target header is found, its offset is set in *offset and return protocol - * number. Otherwise, return -1. - * - * If the first fragment doesn't contain the final protocol header or - * NEXTHDR_NONE it is considered invalid. - * - * Note that non-1st fragment is special case that "the protocol number - * of last header" is "next header" field in Fragment header. In this case, - * *offset is meaningless and fragment offset is stored in *fragoff if fragoff - * isn't NULL. - * - * if flags is not NULL and it's a fragment, then the frag flag - * IP6_FH_F_FRAG will be set. If it's an AH header, the - * IP6_FH_F_AUTH flag is set and target < 0, then this function will - * stop at the AH header. If IP6_FH_F_SKIP_RH flag was passed, then this - * function will skip all those routing headers, where segements_left was 0. - */ -int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, - int target, unsigned short *fragoff, int *flags) -{ - unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); - u8 nexthdr = ipv6_hdr(skb)->nexthdr; - unsigned int len; - bool found; - - if (fragoff) - *fragoff = 0; - - if (*offset) { - struct ipv6hdr _ip6, *ip6; - - ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); - if (!ip6 || (ip6->version != 6)) { - printk(KERN_ERR "IPv6 header not found\n"); - return -EBADMSG; - } - start = *offset + sizeof(struct ipv6hdr); - nexthdr = ip6->nexthdr; - } - len = skb->len - start; - - do { - struct ipv6_opt_hdr _hdr, *hp; - unsigned int hdrlen; - found = (nexthdr == target); - - if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { - if (target < 0) - break; - return -ENOENT; - } - - hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); - if (hp == NULL) - return -EBADMSG; - - if (nexthdr == NEXTHDR_ROUTING) { - struct ipv6_rt_hdr _rh, *rh; - - rh = skb_header_pointer(skb, start, sizeof(_rh), - &_rh); - if (rh == NULL) - return -EBADMSG; - - if (flags && (*flags & IP6_FH_F_SKIP_RH) && - rh->segments_left == 0) - found = false; - } - - if (nexthdr == NEXTHDR_FRAGMENT) { - unsigned short _frag_off; - __be16 *fp; - - if (flags) /* Indicate that this is a fragment */ - *flags |= IP6_FH_F_FRAG; - fp = skb_header_pointer(skb, - start+offsetof(struct frag_hdr, - frag_off), - sizeof(_frag_off), - &_frag_off); - if (fp == NULL) - return -EBADMSG; - - _frag_off = ntohs(*fp) & ~0x7; - if (_frag_off) { - if (target < 0 && - ((!ipv6_ext_hdr(hp->nexthdr)) || - hp->nexthdr == NEXTHDR_NONE)) { - if (fragoff) - *fragoff = _frag_off; - return hp->nexthdr; - } - return -ENOENT; - } - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) { - if (flags && (*flags & IP6_FH_F_AUTH) && (target < 0)) - break; - hdrlen = (hp->hdrlen + 2) << 2; - } else - hdrlen = ipv6_optlen(hp); - - if (!found) { - nexthdr = hp->nexthdr; - len -= hdrlen; - start += hdrlen; - } - } while (!found); - - *offset = start; - return nexthdr; -} -EXPORT_SYMBOL(ipv6_find_hdr); - diff --git a/trunk/net/ipv6/inet6_hashtables.c b/trunk/net/ipv6/inet6_hashtables.c index dea17fd28e50..73f1a00a96af 100644 --- a/trunk/net/ipv6/inet6_hashtables.c +++ b/trunk/net/ipv6/inet6_hashtables.c @@ -87,13 +87,11 @@ struct sock *__inet6_lookup_established(struct net *net, rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { - if (sk->sk_hash != hash) - continue; - if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) { + /* For IPV6 do the cheaper port and family tests first. */ + if (INET6_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) goto begintw; - if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, - ports, dif))) { + if (!INET6_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { sock_put(sk); goto begin; } @@ -106,16 +104,12 @@ struct sock *__inet6_lookup_established(struct net *net, begintw: /* Must check for a TIME_WAIT'er before going to listener hash. */ sk_nulls_for_each_rcu(sk, node, &head->twchain) { - if (sk->sk_hash != hash) - continue; - if (likely(INET6_TW_MATCH(sk, net, saddr, daddr, - ports, dif))) { + if (INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) { sk = NULL; goto out; } - if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, - ports, dif))) { + if (!INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { sock_put(sk); goto begintw; } @@ -242,12 +236,9 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, /* Check TIME-WAIT sockets first. */ sk_nulls_for_each(sk2, node, &head->twchain) { - if (sk2->sk_hash != hash) - continue; + tw = inet_twsk(sk2); - if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr, - ports, dif))) { - tw = inet_twsk(sk2); + if (INET6_TW_MATCH(sk2, net, hash, saddr, daddr, ports, dif)) { if (twsk_unique(sk, sk2, twp)) goto unique; else @@ -258,9 +249,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, /* And established part... */ sk_nulls_for_each(sk2, node, &head->chain) { - if (sk2->sk_hash != hash) - continue; - if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) + if (INET6_MATCH(sk2, net, hash, saddr, daddr, ports, dif)) goto not_unique; } diff --git a/trunk/net/ipv6/netfilter/ip6_tables.c b/trunk/net/ipv6/netfilter/ip6_tables.c index 125a90d6a795..74cadd0719a5 100644 --- a/trunk/net/ipv6/netfilter/ip6_tables.c +++ b/trunk/net/ipv6/netfilter/ip6_tables.c @@ -2271,9 +2271,112 @@ static void __exit ip6_tables_fini(void) unregister_pernet_subsys(&ip6_tables_net_ops); } +/* + * find the offset to specified header or the protocol number of last header + * if target < 0. "last header" is transport protocol header, ESP, or + * "No next header". + * + * Note that *offset is used as input/output parameter. an if it is not zero, + * then it must be a valid offset to an inner IPv6 header. This can be used + * to explore inner IPv6 header, eg. ICMPv6 error messages. + * + * If target header is found, its offset is set in *offset and return protocol + * number. Otherwise, return -1. + * + * If the first fragment doesn't contain the final protocol header or + * NEXTHDR_NONE it is considered invalid. + * + * Note that non-1st fragment is special case that "the protocol number + * of last header" is "next header" field in Fragment header. In this case, + * *offset is meaningless and fragment offset is stored in *fragoff if fragoff + * isn't NULL. + * + * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG + * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and + * target < 0, then this function will stop at the AH header. + */ +int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags) +{ + unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); + u8 nexthdr = ipv6_hdr(skb)->nexthdr; + unsigned int len; + + if (fragoff) + *fragoff = 0; + + if (*offset) { + struct ipv6hdr _ip6, *ip6; + + ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); + if (!ip6 || (ip6->version != 6)) { + printk(KERN_ERR "IPv6 header not found\n"); + return -EBADMSG; + } + start = *offset + sizeof(struct ipv6hdr); + nexthdr = ip6->nexthdr; + } + len = skb->len - start; + + while (nexthdr != target) { + struct ipv6_opt_hdr _hdr, *hp; + unsigned int hdrlen; + + if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { + if (target < 0) + break; + return -ENOENT; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (hp == NULL) + return -EBADMSG; + if (nexthdr == NEXTHDR_FRAGMENT) { + unsigned short _frag_off; + __be16 *fp; + + if (flags) /* Indicate that this is a fragment */ + *flags |= IP6T_FH_F_FRAG; + fp = skb_header_pointer(skb, + start+offsetof(struct frag_hdr, + frag_off), + sizeof(_frag_off), + &_frag_off); + if (fp == NULL) + return -EBADMSG; + + _frag_off = ntohs(*fp) & ~0x7; + if (_frag_off) { + if (target < 0 && + ((!ipv6_ext_hdr(hp->nexthdr)) || + hp->nexthdr == NEXTHDR_NONE)) { + if (fragoff) + *fragoff = _frag_off; + return hp->nexthdr; + } + return -ENOENT; + } + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0)) + break; + hdrlen = (hp->hdrlen + 2) << 2; + } else + hdrlen = ipv6_optlen(hp); + + nexthdr = hp->nexthdr; + len -= hdrlen; + start += hdrlen; + } + + *offset = start; + return nexthdr; +} + EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); +EXPORT_SYMBOL(ipv6_find_hdr); module_init(ip6_tables_init); module_exit(ip6_tables_fini); diff --git a/trunk/net/mac802154/tx.c b/trunk/net/mac802154/tx.c index 4e09d070995a..1a4df39c722e 100644 --- a/trunk/net/mac802154/tx.c +++ b/trunk/net/mac802154/tx.c @@ -85,7 +85,6 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, if (!(priv->phy->channels_supported[page] & (1 << chan))) { WARN_ON(1); - kfree_skb(skb); return NETDEV_TX_OK; } @@ -99,15 +98,13 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, } if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) { - kfree_skb(skb); + dev_kfree_skb(skb); return NETDEV_TX_OK; } work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC); - if (!work) { - kfree_skb(skb); + if (!work) return NETDEV_TX_BUSY; - } INIT_WORK(&work->work, mac802154_xmit_worker); work->skb = skb; diff --git a/trunk/net/mac802154/wpan.c b/trunk/net/mac802154/wpan.c index 1191039c2b1b..f30f6d4beea1 100644 --- a/trunk/net/mac802154/wpan.c +++ b/trunk/net/mac802154/wpan.c @@ -327,10 +327,8 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev) if (chan == MAC802154_CHAN_NONE || page >= WPAN_NUM_PAGES || - chan >= WPAN_NUM_CHANNELS) { - kfree_skb(skb); + chan >= WPAN_NUM_CHANNELS) return NETDEV_TX_OK; - } skb->skb_iif = dev->ifindex; dev->stats.tx_packets++; diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c index 47edf5a40a59..fb45640dc1fb 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_core.c +++ b/trunk/net/netfilter/ipvs/ip_vs_core.c @@ -942,7 +942,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, /* Fragment header that is before ICMP header tells us that: * it's not an error message since they can't be fragmented. */ - if (ipvsh->flags & IP6_FH_F_FRAG) + if (ipvsh->flags & IP6T_FH_F_FRAG) return NF_DROP; IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n", @@ -1475,7 +1475,7 @@ static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, /* Fragment header that is before ICMP header tells us that: * it's not an error message since they can't be fragmented. */ - if (iph->flags & IP6_FH_F_FRAG) + if (iph->flags & IP6T_FH_F_FRAG) return NF_DROP; IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n", diff --git a/trunk/net/netfilter/xt_HMARK.c b/trunk/net/netfilter/xt_HMARK.c index 73b73f687c58..1686ca1b53a1 100644 --- a/trunk/net/netfilter/xt_HMARK.c +++ b/trunk/net/netfilter/xt_HMARK.c @@ -167,7 +167,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, const struct xt_hmark_info *info) { struct ipv6hdr *ip6, _ip6; - int flag = IP6_FH_F_AUTH; + int flag = IP6T_FH_F_AUTH; unsigned int nhoff = 0; u16 fragoff = 0; int nexthdr; @@ -177,7 +177,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, if (nexthdr < 0) return 0; /* No need to check for icmp errors on fragments */ - if ((flag & IP6_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) + if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) goto noicmp; /* Use inner header in case of ICMP errors */ if (get_inner6_hdr(skb, &nhoff)) { @@ -185,7 +185,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, if (ip6 == NULL) return -1; /* If AH present, use SPI like in ESP. */ - flag = IP6_FH_F_AUTH; + flag = IP6T_FH_F_AUTH; nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); if (nexthdr < 0) return -1; @@ -201,7 +201,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, if (t->proto == IPPROTO_ICMPV6) return 0; - if (flag & IP6_FH_F_FRAG) + if (flag & IP6T_FH_F_FRAG) return 0; hmark_set_tuple_ports(skb, nhoff, t, info); diff --git a/trunk/net/openvswitch/actions.c b/trunk/net/openvswitch/actions.c index ac2defeeba83..08114478cb85 100644 --- a/trunk/net/openvswitch/actions.c +++ b/trunk/net/openvswitch/actions.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -163,53 +162,6 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, *addr = new_addr; } -static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, - __be32 addr[4], const __be32 new_addr[4]) -{ - int transport_len = skb->len - skb_transport_offset(skb); - - if (l4_proto == IPPROTO_TCP) { - if (likely(transport_len >= sizeof(struct tcphdr))) - inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, - addr, new_addr, 1); - } else if (l4_proto == IPPROTO_UDP) { - if (likely(transport_len >= sizeof(struct udphdr))) { - struct udphdr *uh = udp_hdr(skb); - - if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { - inet_proto_csum_replace16(&uh->check, skb, - addr, new_addr, 1); - if (!uh->check) - uh->check = CSUM_MANGLED_0; - } - } - } -} - -static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, - __be32 addr[4], const __be32 new_addr[4], - bool recalculate_csum) -{ - if (recalculate_csum) - update_ipv6_checksum(skb, l4_proto, addr, new_addr); - - skb->rxhash = 0; - memcpy(addr, new_addr, sizeof(__be32[4])); -} - -static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) -{ - nh->priority = tc >> 4; - nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); -} - -static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) -{ - nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; - nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; - nh->flow_lbl[2] = fl & 0x000000FF; -} - static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) { csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); @@ -243,47 +195,6 @@ static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) return 0; } -static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) -{ - struct ipv6hdr *nh; - int err; - __be32 *saddr; - __be32 *daddr; - - err = make_writable(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr)); - if (unlikely(err)) - return err; - - nh = ipv6_hdr(skb); - saddr = (__be32 *)&nh->saddr; - daddr = (__be32 *)&nh->daddr; - - if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) - set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, - ipv6_key->ipv6_src, true); - - if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { - unsigned int offset = 0; - int flags = IP6_FH_F_SKIP_RH; - bool recalc_csum = true; - - if (ipv6_ext_hdr(nh->nexthdr)) - recalc_csum = ipv6_find_hdr(skb, &offset, - NEXTHDR_ROUTING, NULL, - &flags) != NEXTHDR_ROUTING; - - set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, - ipv6_key->ipv6_dst, recalc_csum); - } - - set_ipv6_tc(nh, ipv6_key->ipv6_tclass); - set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); - nh->hop_limit = ipv6_key->ipv6_hlimit; - - return 0; -} - /* Must follow make_writable() since that can move the skb data. */ static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) @@ -428,10 +339,6 @@ static int execute_set_action(struct sk_buff *skb, skb->priority = nla_get_u32(nested_attr); break; - case OVS_KEY_ATTR_SKB_MARK: - skb->mark = nla_get_u32(nested_attr); - break; - case OVS_KEY_ATTR_ETHERNET: err = set_eth_addr(skb, nla_data(nested_attr)); break; @@ -440,10 +347,6 @@ static int execute_set_action(struct sk_buff *skb, err = set_ipv4(skb, nla_data(nested_attr)); break; - case OVS_KEY_ATTR_IPV6: - err = set_ipv6(skb, nla_data(nested_attr)); - break; - case OVS_KEY_ATTR_TCP: err = set_tcp(skb, nla_data(nested_attr)); break; diff --git a/trunk/net/openvswitch/datapath.c b/trunk/net/openvswitch/datapath.c index f996db343247..4c4b62ccc7d7 100644 --- a/trunk/net/openvswitch/datapath.c +++ b/trunk/net/openvswitch/datapath.c @@ -208,7 +208,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) int error; int key_len; - stats = this_cpu_ptr(dp->stats_percpu); + stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); /* Extract flow from 'skb' into 'key'. */ error = ovs_flow_extract(skb, p->port_no, &key, &key_len); @@ -282,7 +282,7 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, return 0; err: - stats = this_cpu_ptr(dp->stats_percpu); + stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); u64_stats_update_begin(&stats->sync); stats->n_lost++; @@ -479,10 +479,8 @@ static int validate_set(const struct nlattr *a, switch (key_type) { const struct ovs_key_ipv4 *ipv4_key; - const struct ovs_key_ipv6 *ipv6_key; case OVS_KEY_ATTR_PRIORITY: - case OVS_KEY_ATTR_SKB_MARK: case OVS_KEY_ATTR_ETHERNET: break; @@ -502,25 +500,6 @@ static int validate_set(const struct nlattr *a, break; - case OVS_KEY_ATTR_IPV6: - if (flow_key->eth.type != htons(ETH_P_IPV6)) - return -EINVAL; - - if (!flow_key->ip.proto) - return -EINVAL; - - ipv6_key = nla_data(ovs_key); - if (ipv6_key->ipv6_proto != flow_key->ip.proto) - return -EINVAL; - - if (ipv6_key->ipv6_frag != flow_key->ip.frag) - return -EINVAL; - - if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) - return -EINVAL; - - break; - case OVS_KEY_ATTR_TCP: if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; @@ -696,7 +675,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_flow_free; err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority, - &flow->key.phy.skb_mark, &flow->key.phy.in_port, a[OVS_PACKET_ATTR_KEY]); if (err) @@ -716,7 +694,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) OVS_CB(packet)->flow = flow; packet->priority = flow->key.phy.priority; - packet->mark = flow->key.phy.skb_mark; rcu_read_lock(); dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); diff --git a/trunk/net/openvswitch/flow.c b/trunk/net/openvswitch/flow.c index c3294cebc4f2..733cbf49ed1f 100644 --- a/trunk/net/openvswitch/flow.c +++ b/trunk/net/openvswitch/flow.c @@ -604,7 +604,6 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, key->phy.priority = skb->priority; key->phy.in_port = in_port; - key->phy.skb_mark = skb->mark; skb_reset_mac_header(skb); @@ -690,8 +689,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, } } - } else if ((key->eth.type == htons(ETH_P_ARP) || - key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { + } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); @@ -804,7 +802,6 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ENCAP] = -1, [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), - [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), [OVS_KEY_ATTR_VLAN] = sizeof(__be16), [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), @@ -990,10 +987,6 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, } else { swkey->phy.in_port = DP_MAX_PORTS; } - if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { - swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); - attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); - } /* Data attributes. */ if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) @@ -1093,8 +1086,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, if (err) return err; } - } else if (swkey->eth.type == htons(ETH_P_ARP) || - swkey->eth.type == htons(ETH_P_RARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP)) { const struct ovs_key_arp *arp_key; if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) @@ -1121,8 +1113,6 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, /** * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. - * @priority: receives the skb priority - * @mark: receives the skb mark * @in_port: receives the extracted input port. * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. @@ -1132,7 +1122,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. */ -int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port, +int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, const struct nlattr *attr) { const struct nlattr *nla; @@ -1140,7 +1130,6 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port, *in_port = DP_MAX_PORTS; *priority = 0; - *mark = 0; nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); @@ -1159,10 +1148,6 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port, return -EINVAL; *in_port = nla_get_u32(nla); break; - - case OVS_KEY_ATTR_SKB_MARK: - *mark = nla_get_u32(nla); - break; } } } @@ -1184,10 +1169,6 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) goto nla_put_failure; - if (swkey->phy.skb_mark && - nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark)) - goto nla_put_failure; - nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) goto nla_put_failure; @@ -1241,8 +1222,7 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) ipv6_key->ipv6_tclass = swkey->ip.tos; ipv6_key->ipv6_hlimit = swkey->ip.ttl; ipv6_key->ipv6_frag = swkey->ip.frag; - } else if (swkey->eth.type == htons(ETH_P_ARP) || - swkey->eth.type == htons(ETH_P_RARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP)) { struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); diff --git a/trunk/net/openvswitch/flow.h b/trunk/net/openvswitch/flow.h index a7bb60ff3b5b..14a324eb017b 100644 --- a/trunk/net/openvswitch/flow.h +++ b/trunk/net/openvswitch/flow.h @@ -43,7 +43,6 @@ struct sw_flow_actions { struct sw_flow_key { struct { u32 priority; /* Packet QoS priority. */ - u32 skb_mark; /* SKB mark. */ u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ } phy; struct { @@ -145,7 +144,6 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies); * ------ --- ------ ----- * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 - * OVS_KEY_ATTR_SKB_MARK 4 -- 4 8 * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype) * OVS_KEY_ATTR_8021Q 4 -- 4 8 @@ -155,14 +153,14 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies); * OVS_KEY_ATTR_ICMPV6 2 2 4 8 * OVS_KEY_ATTR_ND 28 -- 4 32 * ------------------------------------------------- - * total 152 + * total 144 */ -#define FLOW_BUFSIZE 152 +#define FLOW_BUFSIZE 144 int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, const struct nlattr *); -int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port, +int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, const struct nlattr *); #define MAX_ACTIONS_BUFSIZE (16 * 1024) diff --git a/trunk/net/openvswitch/vport-netdev.c b/trunk/net/openvswitch/vport-netdev.c index a9327e2e48ce..a9033481fa5e 100644 --- a/trunk/net/openvswitch/vport-netdev.c +++ b/trunk/net/openvswitch/vport-netdev.c @@ -114,15 +114,6 @@ static struct vport *netdev_create(const struct vport_parms *parms) return ERR_PTR(err); } -static void free_port_rcu(struct rcu_head *rcu) -{ - struct netdev_vport *netdev_vport = container_of(rcu, - struct netdev_vport, rcu); - - dev_put(netdev_vport->dev); - ovs_vport_free(vport_from_priv(netdev_vport)); -} - static void netdev_destroy(struct vport *vport) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); @@ -131,7 +122,10 @@ static void netdev_destroy(struct vport *vport) netdev_rx_handler_unregister(netdev_vport->dev); dev_set_promiscuity(netdev_vport->dev, -1); - call_rcu(&netdev_vport->rcu, free_port_rcu); + synchronize_rcu(); + + dev_put(netdev_vport->dev); + ovs_vport_free(vport); } const char *ovs_netdev_get_name(const struct vport *vport) diff --git a/trunk/net/openvswitch/vport-netdev.h b/trunk/net/openvswitch/vport-netdev.h index 6478079b3417..f7072a25c604 100644 --- a/trunk/net/openvswitch/vport-netdev.h +++ b/trunk/net/openvswitch/vport-netdev.h @@ -20,15 +20,12 @@ #define VPORT_NETDEV_H 1 #include -#include #include "vport.h" struct vport *ovs_netdev_get_vport(struct net_device *dev); struct netdev_vport { - struct rcu_head rcu; - struct net_device *dev; }; diff --git a/trunk/net/openvswitch/vport.c b/trunk/net/openvswitch/vport.c index 70af0bedbac4..03779e8a2622 100644 --- a/trunk/net/openvswitch/vport.c +++ b/trunk/net/openvswitch/vport.c @@ -333,7 +333,8 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) { struct vport_percpu_stats *stats; - stats = this_cpu_ptr(vport->percpu_stats); + stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); + u64_stats_update_begin(&stats->sync); stats->rx_packets++; stats->rx_bytes += skb->len; @@ -358,7 +359,7 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) if (likely(sent)) { struct vport_percpu_stats *stats; - stats = this_cpu_ptr(vport->percpu_stats); + stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); u64_stats_update_begin(&stats->sync); stats->tx_packets++; diff --git a/trunk/net/sctp/ipv6.c b/trunk/net/sctp/ipv6.c index f3f0f4dc31dd..ea14cb445295 100644 --- a/trunk/net/sctp/ipv6.c +++ b/trunk/net/sctp/ipv6.c @@ -345,7 +345,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } out: - if (!IS_ERR_OR_NULL(dst)) { + if (!IS_ERR(dst)) { struct rt6_info *rt; rt = (struct rt6_info *)dst; t->dst = dst; diff --git a/trunk/net/sctp/sm_statefuns.c b/trunk/net/sctp/sm_statefuns.c index e92079d27eae..b6adef8a1e93 100644 --- a/trunk/net/sctp/sm_statefuns.c +++ b/trunk/net/sctp/sm_statefuns.c @@ -1055,7 +1055,6 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - sctp_paramhdr_t *param_hdr; struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; size_t paylen = 0; @@ -1073,17 +1072,12 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net, * Information field copied from the received HEARTBEAT chunk. */ chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; - param_hdr = (sctp_paramhdr_t *) chunk->subh.hb_hdr; paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); - - if (ntohs(param_hdr->length) > paylen) - return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, - param_hdr, commands); - if (!pskb_pull(chunk->skb, paylen)) goto nomem; - reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen); + reply = sctp_make_heartbeat_ack(asoc, chunk, + chunk->subh.hb_hdr, paylen); if (!reply) goto nomem;