aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-03-09 16:14:54 -0800
committerDavid S. Miller <davem@davemloft.net>2021-03-09 16:14:54 -0800
commit8515455720c52a0841bd1c9c5f457c9616900110 (patch)
treec2e5f24784164916d6bbace9556bfd57111a4882
parentb005c9ef5adaf1357b7faa977330eaae18647300 (diff)
parent7eefda7f353ef86ad82a2dc8329e8a3538c08ab6 (diff)
downloadsparc-8515455720c52a0841bd1c9c5f457c9616900110.tar.gz
Merge branch 's390-qeth-fixes'
Julian Wiedmann says: ==================== s390/qeth: fixes 2021-03-09 please apply the following patch series to netdev's net tree. This brings one fix for a memleak in an error path of the setup code. Also several fixes for dealing with pending TX buffers - two for old bugs in their completion handling, and one recent regression in a teardown path. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c128
2 files changed, 62 insertions, 69 deletions
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index a1da83b0b0ef3..91acff493612a 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -436,7 +436,7 @@ struct qeth_qdio_out_buffer {
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
struct qeth_qdio_out_q *q;
- struct qeth_qdio_out_buffer *next_pending;
+ struct list_head list_entry;
};
struct qeth_card;
@@ -500,6 +500,7 @@ struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
+ struct list_head pending_bufs;
struct qeth_out_q_stats stats;
spinlock_t lock;
unsigned int priority;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b71b8902d1c49..a814698387bc3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,8 +73,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
- int budget);
static void qeth_close_dev_handler(struct work_struct *work)
{
@@ -465,41 +463,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
-static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
- int forced_cleanup)
-{
- if (q->card->options.cq != QETH_CQ_ENABLED)
- return;
-
- if (q->bufs[bidx]->next_pending != NULL) {
- struct qeth_qdio_out_buffer *head = q->bufs[bidx];
- struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
-
- while (c) {
- if (forced_cleanup ||
- atomic_read(&c->state) == QETH_QDIO_BUF_EMPTY) {
- struct qeth_qdio_out_buffer *f = c;
-
- QETH_CARD_TEXT(f->q->card, 5, "fp");
- QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
- /* release here to avoid interleaving between
- outbound tasklet and inbound tasklet
- regarding notifications and lifecycle */
- qeth_tx_complete_buf(c, forced_cleanup, 0);
-
- c = f->next_pending;
- WARN_ON_ONCE(head->next_pending != f);
- head->next_pending = c;
- kmem_cache_free(qeth_qdio_outbuf_cache, f);
- } else {
- head = c;
- c = c->next_pending;
- }
-
- }
- }
-}
-
static void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr)
{
@@ -507,6 +470,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
+ struct qeth_qdio_out_q *queue;
unsigned int i;
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
@@ -537,7 +501,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
qeth_notify_skbs(buffer->q, buffer, notification);
/* Free dangling allocations. The attached skbs are handled by
- * qeth_cleanup_handled_pending().
+ * qeth_tx_complete_pending_bufs().
*/
for (i = 0;
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
@@ -549,7 +513,9 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->is_header[i] = 0;
}
+ queue = buffer->q;
atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
+ napi_schedule(&queue->napi);
break;
default:
WARN_ON_ONCE(1);
@@ -1424,9 +1390,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
struct qeth_qdio_out_q *queue = buf->q;
struct sk_buff *skb;
- if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
- qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
-
/* Empty buffer? */
if (buf->next_element_to_fill == 0)
return;
@@ -1488,14 +1451,38 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
+static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue,
+ bool drain)
+{
+ struct qeth_qdio_out_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
+ if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
+ QETH_CARD_TEXT(card, 5, "fp");
+ QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
+
+ if (drain)
+ qeth_notify_skbs(queue, buf,
+ TX_NOTIFY_GENERALERROR);
+ qeth_tx_complete_buf(buf, drain, 0);
+
+ list_del(&buf->list_entry);
+ kmem_cache_free(qeth_qdio_outbuf_cache, buf);
+ }
+ }
+}
+
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
+ qeth_tx_complete_pending_bufs(q->card, q, true);
+
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
- qeth_cleanup_handled_pending(q, j, 1);
+
qeth_clear_output_buffer(q, q->bufs[j], true, 0);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -2615,7 +2602,6 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
newbuf->q = q;
- newbuf->next_pending = q->bufs[bidx];
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
return 0;
@@ -2634,15 +2620,28 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
{
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
+ unsigned int i;
if (!q)
return NULL;
- if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
- kfree(q);
- return NULL;
+ if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
+ goto err_qdio_bufs;
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+ if (qeth_init_qdio_out_buf(q, i))
+ goto err_out_bufs;
}
+
return q;
+
+err_out_bufs:
+ while (i > 0)
+ kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
+ qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+err_qdio_bufs:
+ kfree(q);
+ return NULL;
}
static void qeth_tx_completion_timer(struct timer_list *timer)
@@ -2655,7 +2654,7 @@ static void qeth_tx_completion_timer(struct timer_list *timer)
static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
- int i, j;
+ unsigned int i;
QETH_CARD_TEXT(card, 2, "allcqdbf");
@@ -2684,18 +2683,12 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
card->qdio.out_qs[i] = queue;
queue->card = card;
queue->queue_no = i;
+ INIT_LIST_HEAD(&queue->pending_bufs);
spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
-
- /* give outbound qeth_qdio_buffers their qdio_buffers */
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- WARN_ON(queue->bufs[j]);
- if (qeth_init_qdio_out_buf(queue, j))
- goto out_freeoutqbufs;
- }
}
/* completion */
@@ -2704,13 +2697,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
return 0;
-out_freeoutqbufs:
- while (j > 0) {
- --j;
- kmem_cache_free(qeth_qdio_outbuf_cache,
- card->qdio.out_qs[i]->bufs[j]);
- card->qdio.out_qs[i]->bufs[j] = NULL;
- }
out_freeoutq:
while (i > 0) {
qeth_free_output_queue(card->qdio.out_qs[--i]);
@@ -6107,6 +6093,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
qeth_schedule_recovery(card);
}
+ list_add(&buffer->list_entry,
+ &queue->pending_bufs);
/* Skip clearing the buffer: */
return;
case QETH_QDIO_BUF_QAOB_OK:
@@ -6162,6 +6150,8 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int bytes = 0;
int completed;
+ qeth_tx_complete_pending_bufs(card, queue, false);
+
if (qeth_out_queue_is_empty(queue)) {
napi_complete(napi);
return 0;
@@ -6194,7 +6184,6 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
qeth_handle_send_error(card, buffer, error);
qeth_iqd_tx_complete(queue, bidx, error, budget);
- qeth_cleanup_handled_pending(queue, bidx, false);
}
netdev_tx_completed_queue(txq, packets, bytes);
@@ -7249,9 +7238,7 @@ int qeth_open(struct net_device *dev)
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
- napi_enable(&card->napi);
local_bh_disable();
- napi_schedule(&card->napi);
if (IS_IQD(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
@@ -7263,8 +7250,12 @@ int qeth_open(struct net_device *dev)
napi_schedule(&queue->napi);
}
}
+
+ napi_enable(&card->napi);
+ napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);
@@ -7274,6 +7265,11 @@ int qeth_stop(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "qethstop");
+
+ napi_disable(&card->napi);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
+ qdio_stop_irq(CARD_DDEV(card));
+
if (IS_IQD(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
@@ -7294,10 +7290,6 @@ int qeth_stop(struct net_device *dev)
netif_tx_disable(dev);
}
- napi_disable(&card->napi);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
- qdio_stop_irq(CARD_DDEV(card));
-
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);