aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAuke Kok <auke\-jan.h.kok@intel.com>2006-07-14 16:14:23 -0700
committerAuke Kok <juke-jan.h.kok@intel.com>2006-07-14 16:14:23 -0700
commitd3d9e484b2ca502c87156b69fa6b8f8fd5fa18a0 (patch)
tree25cc750bb3a88599645c0415110e8ed9a6086f64 /drivers
parent22e1170310ec6afa41e0dc7ac9dfac735d82dcab (diff)
downloadlinux-d3d9e484b2ca502c87156b69fa6b8f8fd5fa18a0.tar.gz
e1000: Redo netpoll fix to address community concerns
The original suggested fix for netpoll was found to be racy on SMP kernels. While it is highly unlikely that this race would ever be seen in the real world due to current netpoll usage models, we implemented this updated fix to address concerns. Signed-off-by: Mitch Williams <mitch.a.williams@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/e1000/e1000_main.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6d3d4193450381..1c6bcad5b910d0 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3387,8 +3387,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
- if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
- __netif_rx_schedule(&adapter->polling_netdev[0]);
+ if (likely(netif_rx_schedule_prep(netdev)))
+ __netif_rx_schedule(netdev);
else
e1000_irq_enable(adapter);
#else
@@ -3431,34 +3431,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
{
struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
- int tx_cleaned = 0, i = 0, work_done = 0;
+ int tx_cleaned = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;
/* Keep link state information with original netdev */
- if (!netif_carrier_ok(adapter->netdev))
+ if (!netif_carrier_ok(poll_dev))
goto quit_polling;
- while (poll_dev != &adapter->polling_netdev[i]) {
- i++;
- BUG_ON(i == adapter->num_rx_queues);
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
}
- if (likely(adapter->num_tx_queues == 1)) {
- /* e1000_clean is called per-cpu. This lock protects
- * tx_ring[0] from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- spin_unlock(&adapter->tx_queue_lock);
- }
- } else
- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
-
- adapter->clean_rx(adapter, &adapter->rx_ring[i],
+ adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, work_to_do);
*budget -= work_done;
@@ -3466,7 +3458,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
/* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done == 0)) ||
- !netif_running(adapter->netdev)) {
+ !netif_running(poll_dev)) {
quit_polling:
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
@@ -4752,6 +4744,7 @@ static void
e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter, adapter->tx_ring);