cns3xxx: ethernet - resolve SMP issue

The combination of r35942 and r35952 causes an issue where eth_schedule_poll()
can be called from a different CPU between the call to napi_complete() and the
setting of cur_index which can break the rx ring accounting and cause ethernet
latency and/or ethernet stalls.  The issue can be easilly created by adding
a couple of artificial delays such as:

@@ -715,6 +715,7 @@ static int eth_poll(struct napi_struct *napi, int budget)

 	if (!received) {
 		napi_complete(napi);
+udelay(1000);
 		enable_irq(IRQ_CNS3XXX_SW_R0RXC);
 	}

@@ -727,6 +728,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
 	rx_ring->cur_index = i;

 	wmb();
+udelay(1000);
 	enable_rx_dma(sw);

 	return received;

This patch moves the setting of cur_index back up where it needs to be and
addresses the original corner case that r35942 was trying to catch in an
improved fashion by checking to see if the rx descriptor ring has become
full before interrupts were re-enabled so that a poll can be scheduled again
and avoid an rx stall caused by rx interrupts ceasing to fire again.

Signed-off-by: Tim Harvey <tharvey@gateworks.com>

SVN-Revision: 39761
lede-17.01
Felix Fietkau 2014-02-27 23:02:37 +00:00
parent f7f117b88d
commit 96eb3d883d
1 changed files with 5 additions and 2 deletions

View File

@ -713,9 +713,14 @@ static int eth_poll(struct napi_struct *napi, int budget)
} }
} }
rx_ring->cur_index = i;
if (!received) { if (!received) {
napi_complete(napi); napi_complete(napi);
enable_irq(IRQ_CNS3XXX_SW_R0RXC); enable_irq(IRQ_CNS3XXX_SW_R0RXC);
/* if rx descriptors are full schedule another poll */
if (rx_ring->desc[(i-1) & (RX_DESCS-1)].cown)
eth_schedule_poll(sw);
} }
spin_lock_bh(&tx_lock); spin_lock_bh(&tx_lock);
@ -724,8 +729,6 @@ static int eth_poll(struct napi_struct *napi, int budget)
cns3xxx_alloc_rx_buf(sw, received); cns3xxx_alloc_rx_buf(sw, received);
rx_ring->cur_index = i;
wmb(); wmb();
enable_rx_dma(sw); enable_rx_dma(sw);