From 851c5879141e8d288d59c9d62f5005ff29be335a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:11 -0500 Subject: [PATCH] net: xmit lock owner cleanup commit 543a589a00e831de55e5f67802b73033dc2dd964 in tip. - __netif_tx_lock() always passes in 'current' as the lock owner, so eliminate this parameter. - likewise for HARD_TX_LOCK() Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- drivers/net/bnx2.c | 2 +- drivers/net/mv643xx_eth.c | 6 +++--- drivers/net/netxen/netxen_nic_init.c | 2 +- drivers/net/niu.c | 2 +- include/linux/netdevice.h | 24 ++++++++++++++---------- net/core/dev.c | 4 ++-- net/core/netpoll.c | 2 +- net/sched/sch_generic.c | 4 ++-- 8 files changed, 25 insertions(+), 21 deletions(-) diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b398c4d..58e66a7 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -2858,7 +2858,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (unlikely(netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { - __netif_tx_lock(txq, (void *)current); + __netif_tx_lock(txq); if ((netif_tx_queue_stopped(txq)) && (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) netif_tx_wake_queue(txq); diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 89a155f..5644220 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { - __netif_tx_lock(nq, (void *)current); + __netif_tx_lock(nq); if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); @@ -901,7 +901,7 @@ static void txq_kick(struct tx_queue *txq) u32 hw_desc_ptr; u32 expected_ptr; - __netif_tx_lock(nq, (void *)current); + __netif_tx_lock(nq); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; @@ -925,7 +925,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, (void *)current); + __netif_tx_lock(nq); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 02876f5..8fcc0f8 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1629,7 +1629,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { - __netif_tx_lock(tx_ring->txq, smp_processor_id()); + __netif_tx_lock(tx_ring->txq); if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_wake_queue(netdev); adapter->tx_timeo_cnt = 0; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 43f4469..ab05c03 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3647,7 +3647,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { - __netif_tx_lock(txq, (void *)current); + __netif_tx_lock(txq); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bd1ac59..a92de08 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1781,10 +1781,18 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1 << debug_value) - 1; } -static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr) +static inline void __netif_tx_lock(struct netdev_queue *txq) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = curr; + txq->xmit_lock_owner = (void *)current; +} + +/* + * Do we hold the xmit_lock already? + */ +static inline int netif_tx_lock_recursion(struct netdev_queue *txq) +{ + return txq->xmit_lock_owner == (void *)current; } static inline void __netif_tx_lock_bh(struct netdev_queue *txq) @@ -1828,10 +1836,8 @@ static inline void txq_trans_update(struct netdev_queue *txq) static inline void netif_tx_lock(struct net_device *dev) { unsigned int i; - void *curr; spin_lock(&dev->tx_global_lock); - curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -1841,7 +1847,7 @@ static inline void netif_tx_lock(struct net_device *dev) * the ->hard_start_xmit() handler and already * checked the frozen bit. */ - __netif_tx_lock(txq, curr); + __netif_tx_lock(txq); set_bit(__QUEUE_STATE_FROZEN, &txq->state); __netif_tx_unlock(txq); } @@ -1876,9 +1882,9 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) local_bh_enable(); } -#define HARD_TX_LOCK(dev, txq, curr) { \ +#define HARD_TX_LOCK(dev, txq) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \ - __netif_tx_lock(txq, curr); \ + __netif_tx_lock(txq); \ } \ } @@ -1891,14 +1897,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; - void *curr; local_bh_disable(); - curr = (void *)current; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_lock(txq, curr); + __netif_tx_lock(txq); netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } diff --git a/net/core/dev.c b/net/core/dev.c index 90dd59b..953e15b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2133,9 +2133,9 @@ gso: /* * No need to check for recursion with threaded interrupts: */ - if (txq->xmit_lock_owner != (void *)current) { + if (!netif_tx_lock_recursion(txq)) { - HARD_TX_LOCK(dev, txq, (void *)current); + HARD_TX_LOCK(dev, txq); if (!netif_tx_queue_stopped(txq)) { rc = dev_hard_start_xmit(skb, dev, txq); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 1ef0746..696684c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -75,7 +75,7 @@ static void queue_process(struct work_struct *work) txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); local_irq_save_nort(flags); - __netif_tx_lock(txq, (void *)current); + __netif_tx_lock(txq); if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e90a2c6..47834d8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -79,7 +79,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, { int ret; - if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) { + if (unlikely(netif_tx_lock_recursion(dev_queue))) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We @@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, /* And release qdisc */ spin_unlock(root_lock); - HARD_TX_LOCK(dev, txq, (void *)current); + HARD_TX_LOCK(dev, txq); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); -- 1.7.0.4