xen/netback: fix rx queue stall detection
[linux-block.git] / drivers / net / xen-netback / rx.c
CommitLineData
3254f836
PD
1/*
2 * Copyright (c) 2016 Citrix Systems Inc.
3 * Copyright (c) 2002-2005, K A Fraser
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation; or, when distributed
8 * separately from the Linux kernel or incorporated into other
9 * software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
3254f836
PD
29#include "common.h"
30
31#include <linux/kthread.h>
32
33#include <xen/xen.h>
34#include <xen/events.h>
35
6032046e
JG
36/*
37 * Update the needed ring page slots for the first SKB queued.
38 * Note that any call sequence outside the RX thread calling this function
39 * needs to wake up the RX thread via a call of xenvif_kick_thread()
40 * afterwards in order to avoid a race with putting the thread to sleep.
41 */
42static void xenvif_update_needed_slots(struct xenvif_queue *queue,
43 const struct sk_buff *skb)
3254f836 44{
6032046e 45 unsigned int needed = 0;
3254f836 46
6032046e
JG
47 if (skb) {
48 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
49 if (skb_is_gso(skb))
50 needed++;
51 if (skb->sw_hash)
52 needed++;
ec7d8e7d 53 }
3254f836 54
6032046e
JG
55 WRITE_ONCE(queue->rx_slots_needed, needed);
56}
3254f836 57
6032046e
JG
58static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
59{
60 RING_IDX prod, cons;
61 unsigned int needed;
62
63 needed = READ_ONCE(queue->rx_slots_needed);
64 if (!needed)
65 return false;
ec7d8e7d 66
3254f836
PD
67 do {
68 prod = queue->rx.sring->req_prod;
69 cons = queue->rx.req_cons;
70
71 if (prod - cons >= needed)
72 return true;
73
74 queue->rx.sring->req_event = prod + 1;
75
76 /* Make sure event is visible before we check prod
77 * again.
78 */
79 mb();
80 } while (queue->rx.sring->req_prod != prod);
81
82 return false;
83}
84
85void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
86{
87 unsigned long flags;
88
89 spin_lock_irqsave(&queue->rx_queue.lock, flags);
90
6032046e
JG
91 if (skb_queue_empty(&queue->rx_queue))
92 xenvif_update_needed_slots(queue, skb);
93
3254f836
PD
94 __skb_queue_tail(&queue->rx_queue, skb);
95
96 queue->rx_queue_len += skb->len;
97 if (queue->rx_queue_len > queue->rx_queue_max) {
98 struct net_device *dev = queue->vif->dev;
99
100 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
101 }
102
103 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
104}
105
106static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
107{
108 struct sk_buff *skb;
109
110 spin_lock_irq(&queue->rx_queue.lock);
111
112 skb = __skb_dequeue(&queue->rx_queue);
7c0b1a23 113 if (skb) {
6032046e
JG
114 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
115
3254f836 116 queue->rx_queue_len -= skb->len;
7c0b1a23
DV
117 if (queue->rx_queue_len < queue->rx_queue_max) {
118 struct netdev_queue *txq;
3254f836 119
7c0b1a23
DV
120 txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
121 netif_tx_wake_queue(txq);
122 }
3254f836
PD
123 }
124
125 spin_unlock_irq(&queue->rx_queue.lock);
7c0b1a23
DV
126
127 return skb;
3254f836
PD
128}
129
130static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
131{
132 struct sk_buff *skb;
133
134 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
135 kfree_skb(skb);
136}
137
138static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
139{
140 struct sk_buff *skb;
141
142 for (;;) {
143 skb = skb_peek(&queue->rx_queue);
144 if (!skb)
145 break;
146 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
147 break;
148 xenvif_rx_dequeue(queue);
149 kfree_skb(skb);
150 }
151}
152
eb1723a2 153static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
3254f836 154{
eb1723a2 155 unsigned int i;
a37f1229 156 int notify;
3254f836 157
eb1723a2 158 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
3254f836 159
eb1723a2
DV
160 for (i = 0; i < queue->rx_copy.num; i++) {
161 struct gnttab_copy *op;
3254f836 162
eb1723a2 163 op = &queue->rx_copy.op[i];
3254f836 164
eb1723a2
DV
165 /* If the copy failed, overwrite the status field in
166 * the corresponding response.
167 */
168 if (unlikely(op->status != GNTST_okay)) {
169 struct xen_netif_rx_response *rsp;
3254f836 170
eb1723a2
DV
171 rsp = RING_GET_RESPONSE(&queue->rx,
172 queue->rx_copy.idx[i]);
173 rsp->status = op->status;
174 }
175 }
3254f836 176
eb1723a2 177 queue->rx_copy.num = 0;
a37f1229
DV
178
179 /* Push responses for all completed packets. */
180 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
181 if (notify)
182 notify_remote_via_irq(queue->rx_irq);
183
184 __skb_queue_purge(queue->rx_copy.completed);
eb1723a2 185}
3254f836 186
eb1723a2
DV
187static void xenvif_rx_copy_add(struct xenvif_queue *queue,
188 struct xen_netif_rx_request *req,
189 unsigned int offset, void *data, size_t len)
3254f836 190{
eb1723a2
DV
191 struct gnttab_copy *op;
192 struct page *page;
3254f836 193 struct xen_page_foreign *foreign;
3254f836 194
eb1723a2
DV
195 if (queue->rx_copy.num == COPY_BATCH_SIZE)
196 xenvif_rx_copy_flush(queue);
3254f836 197
eb1723a2 198 op = &queue->rx_copy.op[queue->rx_copy.num];
3254f836 199
eb1723a2 200 page = virt_to_page(data);
3254f836 201
eb1723a2 202 op->flags = GNTCOPY_dest_gref;
3254f836
PD
203
204 foreign = xen_page_foreign(page);
205 if (foreign) {
eb1723a2
DV
206 op->source.domid = foreign->domid;
207 op->source.u.ref = foreign->gref;
208 op->flags |= GNTCOPY_source_gref;
3254f836 209 } else {
eb1723a2
DV
210 op->source.u.gmfn = virt_to_gfn(data);
211 op->source.domid = DOMID_SELF;
3254f836 212 }
3254f836 213
eb1723a2
DV
214 op->source.offset = xen_offset_in_page(data);
215 op->dest.u.ref = req->gref;
216 op->dest.domid = queue->vif->domid;
217 op->dest.offset = offset;
218 op->len = len;
3254f836 219
eb1723a2
DV
220 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
221 queue->rx_copy.num++;
3254f836
PD
222}
223
eb1723a2 224static unsigned int xenvif_gso_type(struct sk_buff *skb)
3254f836 225{
3254f836
PD
226 if (skb_is_gso(skb)) {
227 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
eb1723a2
DV
228 return XEN_NETIF_GSO_TYPE_TCPV4;
229 else
230 return XEN_NETIF_GSO_TYPE_TCPV6;
3254f836 231 }
eb1723a2
DV
232 return XEN_NETIF_GSO_TYPE_NONE;
233}
3254f836 234
eb1723a2
DV
235struct xenvif_pkt_state {
236 struct sk_buff *skb;
237 size_t remaining_len;
2167ca02
RL
238 struct sk_buff *frag_iter;
239 int frag; /* frag == -1 => frag_iter->head */
eb1723a2
DV
240 unsigned int frag_offset;
241 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
242 unsigned int extra_count;
243 unsigned int slot;
244};
3254f836 245
eb1723a2
DV
246static void xenvif_rx_next_skb(struct xenvif_queue *queue,
247 struct xenvif_pkt_state *pkt)
248{
249 struct sk_buff *skb;
250 unsigned int gso_type;
3254f836 251
eb1723a2 252 skb = xenvif_rx_dequeue(queue);
3254f836 253
eb1723a2
DV
254 queue->stats.tx_bytes += skb->len;
255 queue->stats.tx_packets++;
3254f836 256
eb1723a2
DV
257 /* Reset packet state. */
258 memset(pkt, 0, sizeof(struct xenvif_pkt_state));
3254f836 259
eb1723a2 260 pkt->skb = skb;
2167ca02 261 pkt->frag_iter = skb;
eb1723a2
DV
262 pkt->remaining_len = skb->len;
263 pkt->frag = -1;
3254f836 264
eb1723a2
DV
265 gso_type = xenvif_gso_type(skb);
266 if ((1 << gso_type) & queue->vif->gso_mask) {
267 struct xen_netif_extra_info *extra;
3254f836 268
eb1723a2 269 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
3254f836 270
eb1723a2
DV
271 extra->u.gso.type = gso_type;
272 extra->u.gso.size = skb_shinfo(skb)->gso_size;
273 extra->u.gso.pad = 0;
274 extra->u.gso.features = 0;
275 extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
276 extra->flags = 0;
3254f836 277
eb1723a2 278 pkt->extra_count++;
3254f836
PD
279 }
280
1c9535c7
DK
281 if (queue->vif->xdp_headroom) {
282 struct xen_netif_extra_info *extra;
283
284 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
285
286 memset(extra, 0, sizeof(struct xen_netif_extra_info));
287 extra->u.xdp.headroom = queue->vif->xdp_headroom;
288 extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
289 extra->flags = 0;
290
291 pkt->extra_count++;
292 }
293
eb1723a2
DV
294 if (skb->sw_hash) {
295 struct xen_netif_extra_info *extra;
3254f836 296
eb1723a2 297 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
3254f836 298
eb1723a2
DV
299 extra->u.hash.algorithm =
300 XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
3254f836 301
eb1723a2
DV
302 if (skb->l4_hash)
303 extra->u.hash.type =
304 skb->protocol == htons(ETH_P_IP) ?
305 _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
306 _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
307 else
308 extra->u.hash.type =
309 skb->protocol == htons(ETH_P_IP) ?
310 _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
311 _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
3254f836 312
eb1723a2 313 *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
3254f836 314
eb1723a2
DV
315 extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
316 extra->flags = 0;
3254f836 317
eb1723a2 318 pkt->extra_count++;
3254f836 319 }
3254f836
PD
320}
321
eb1723a2
DV
322static void xenvif_rx_complete(struct xenvif_queue *queue,
323 struct xenvif_pkt_state *pkt)
3254f836 324{
a37f1229 325 /* All responses are ready to be pushed. */
eb1723a2 326 queue->rx.rsp_prod_pvt = queue->rx.req_cons;
3254f836 327
a37f1229 328 __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
3254f836
PD
329}
330
2167ca02
RL
331static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
332{
333 struct sk_buff *frag_iter = pkt->frag_iter;
334 unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
335
336 pkt->frag++;
337 pkt->frag_offset = 0;
338
339 if (pkt->frag >= nr_frags) {
340 if (frag_iter == pkt->skb)
341 pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
342 else
343 pkt->frag_iter = frag_iter->next;
344
345 pkt->frag = -1;
346 }
347}
348
eb1723a2
DV
349static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
350 struct xenvif_pkt_state *pkt,
351 unsigned int offset, void **data,
352 size_t *len)
3254f836 353{
2167ca02 354 struct sk_buff *frag_iter = pkt->frag_iter;
eb1723a2
DV
355 void *frag_data;
356 size_t frag_len, chunk_len;
3254f836 357
2167ca02
RL
358 BUG_ON(!frag_iter);
359
eb1723a2 360 if (pkt->frag == -1) {
2167ca02
RL
361 frag_data = frag_iter->data;
362 frag_len = skb_headlen(frag_iter);
eb1723a2 363 } else {
2167ca02 364 skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
3254f836 365
eb1723a2
DV
366 frag_data = skb_frag_address(frag);
367 frag_len = skb_frag_size(frag);
3254f836 368 }
3254f836 369
eb1723a2
DV
370 frag_data += pkt->frag_offset;
371 frag_len -= pkt->frag_offset;
3254f836 372
f112be65
AB
373 chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
374 chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
375 xen_offset_in_page(frag_data));
3254f836 376
eb1723a2 377 pkt->frag_offset += chunk_len;
3254f836 378
eb1723a2 379 /* Advance to next frag? */
2167ca02
RL
380 if (frag_len == chunk_len)
381 xenvif_rx_next_frag(pkt);
3254f836 382
eb1723a2
DV
383 *data = frag_data;
384 *len = chunk_len;
385}
3254f836 386
eb1723a2
DV
387static void xenvif_rx_data_slot(struct xenvif_queue *queue,
388 struct xenvif_pkt_state *pkt,
389 struct xen_netif_rx_request *req,
390 struct xen_netif_rx_response *rsp)
391{
1c9535c7 392 unsigned int offset = queue->vif->xdp_headroom;
eb1723a2 393 unsigned int flags;
3254f836 394
eb1723a2
DV
395 do {
396 size_t len;
397 void *data;
3254f836 398
eb1723a2
DV
399 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
400 xenvif_rx_copy_add(queue, req, offset, data, len);
3254f836 401
eb1723a2
DV
402 offset += len;
403 pkt->remaining_len -= len;
3254f836 404
eb1723a2 405 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
3254f836 406
eb1723a2
DV
407 if (pkt->remaining_len > 0)
408 flags = XEN_NETRXF_more_data;
409 else
410 flags = 0;
411
412 if (pkt->slot == 0) {
413 struct sk_buff *skb = pkt->skb;
3254f836 414
eb1723a2 415 if (skb->ip_summed == CHECKSUM_PARTIAL)
3254f836
PD
416 flags |= XEN_NETRXF_csum_blank |
417 XEN_NETRXF_data_validated;
418 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
3254f836
PD
419 flags |= XEN_NETRXF_data_validated;
420
eb1723a2
DV
421 if (pkt->extra_count != 0)
422 flags |= XEN_NETRXF_extra_info;
423 }
3254f836 424
eb1723a2
DV
425 rsp->offset = 0;
426 rsp->flags = flags;
427 rsp->id = req->id;
428 rsp->status = (s16)offset;
429}
3254f836 430
eb1723a2
DV
431static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
432 struct xenvif_pkt_state *pkt,
433 struct xen_netif_rx_request *req,
434 struct xen_netif_rx_response *rsp)
435{
436 struct xen_netif_extra_info *extra = (void *)rsp;
437 unsigned int i;
3254f836 438
eb1723a2 439 pkt->extra_count--;
3254f836 440
eb1723a2
DV
441 for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
442 if (pkt->extras[i].type) {
443 *extra = pkt->extras[i];
3254f836 444
eb1723a2 445 if (pkt->extra_count != 0)
3254f836 446 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
eb1723a2
DV
447
448 pkt->extras[i].type = 0;
449 return;
3254f836 450 }
eb1723a2
DV
451 }
452 BUG();
453}
3254f836 454
ed820f47 455static void xenvif_rx_skb(struct xenvif_queue *queue)
eb1723a2
DV
456{
457 struct xenvif_pkt_state pkt;
3254f836 458
eb1723a2 459 xenvif_rx_next_skb(queue, &pkt);
3254f836 460
d1ef006d
DV
461 queue->last_rx_time = jiffies;
462
eb1723a2
DV
463 do {
464 struct xen_netif_rx_request *req;
465 struct xen_netif_rx_response *rsp;
3254f836 466
eb1723a2
DV
467 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
468 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
3254f836 469
eb1723a2
DV
470 /* Extras must go after the first data slot */
471 if (pkt.slot != 0 && pkt.extra_count != 0)
472 xenvif_rx_extra_slot(queue, &pkt, req, rsp);
473 else
474 xenvif_rx_data_slot(queue, &pkt, req, rsp);
475
476 queue->rx.req_cons++;
477 pkt.slot++;
478 } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
479
480 xenvif_rx_complete(queue, &pkt);
3254f836
PD
481}
482
98f6d57c
DV
483#define RX_BATCH_SIZE 64
484
485void xenvif_rx_action(struct xenvif_queue *queue)
486{
a37f1229 487 struct sk_buff_head completed_skbs;
98f6d57c
DV
488 unsigned int work_done = 0;
489
a37f1229
DV
490 __skb_queue_head_init(&completed_skbs);
491 queue->rx_copy.completed = &completed_skbs;
492
98f6d57c
DV
493 while (xenvif_rx_ring_slots_available(queue) &&
494 work_done < RX_BATCH_SIZE) {
495 xenvif_rx_skb(queue);
496 work_done++;
497 }
a37f1229
DV
498
499 /* Flush any pending copies and complete all skbs. */
500 xenvif_rx_copy_flush(queue);
98f6d57c
DV
501}
502
6032046e 503static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
3254f836
PD
504{
505 RING_IDX prod, cons;
506
507 prod = queue->rx.sring->req_prod;
508 cons = queue->rx.req_cons;
509
6032046e
JG
510 return prod - cons;
511}
512
513static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
514{
515 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
516
3254f836 517 return !queue->stalled &&
6032046e 518 xenvif_rx_queue_slots(queue) < needed &&
3254f836
PD
519 time_after(jiffies,
520 queue->last_rx_time + queue->vif->stall_timeout);
521}
522
523static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
524{
6032046e 525 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
3254f836 526
6032046e 527 return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
3254f836
PD
528}
529
23025393 530bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
3254f836
PD
531{
532 return xenvif_rx_ring_slots_available(queue) ||
533 (queue->vif->stall_timeout &&
534 (xenvif_rx_queue_stalled(queue) ||
535 xenvif_rx_queue_ready(queue))) ||
23025393 536 (test_kthread && kthread_should_stop()) ||
3254f836
PD
537 queue->vif->disabled;
538}
539
540static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
541{
542 struct sk_buff *skb;
543 long timeout;
544
545 skb = skb_peek(&queue->rx_queue);
546 if (!skb)
547 return MAX_SCHEDULE_TIMEOUT;
548
549 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
550 return timeout < 0 ? 0 : timeout;
551}
552
553/* Wait until the guest Rx thread has work.
554 *
555 * The timeout needs to be adjusted based on the current head of the
556 * queue (and not just the head at the beginning). In particular, if
557 * the queue is initially empty an infinite timeout is used and this
558 * needs to be reduced when a skb is queued.
559 *
560 * This cannot be done with wait_event_timeout() because it only
561 * calculates the timeout once.
562 */
563static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
564{
565 DEFINE_WAIT(wait);
566
23025393 567 if (xenvif_have_rx_work(queue, true))
3254f836
PD
568 return;
569
570 for (;;) {
571 long ret;
572
573 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
23025393 574 if (xenvif_have_rx_work(queue, true))
3254f836 575 break;
23025393
JG
576 if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
577 &queue->eoi_pending) &
578 (NETBK_RX_EOI | NETBK_COMMON_EOI))
579 xen_irq_lateeoi(queue->rx_irq, 0);
580
3254f836
PD
581 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
582 if (!ret)
583 break;
584 }
585 finish_wait(&queue->wq, &wait);
586}
587
588static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
589{
590 struct xenvif *vif = queue->vif;
591
592 queue->stalled = true;
593
594 /* At least one queue has stalled? Disable the carrier. */
595 spin_lock(&vif->lock);
596 if (vif->stalled_queues++ == 0) {
597 netdev_info(vif->dev, "Guest Rx stalled");
598 netif_carrier_off(vif->dev);
599 }
600 spin_unlock(&vif->lock);
601}
602
603static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
604{
605 struct xenvif *vif = queue->vif;
606
607 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
608 queue->stalled = false;
609
610 /* All queues are ready? Enable the carrier. */
611 spin_lock(&vif->lock);
612 if (--vif->stalled_queues == 0) {
613 netdev_info(vif->dev, "Guest Rx ready");
614 netif_carrier_on(vif->dev);
615 }
616 spin_unlock(&vif->lock);
617}
618
619int xenvif_kthread_guest_rx(void *data)
620{
621 struct xenvif_queue *queue = data;
622 struct xenvif *vif = queue->vif;
623
624 if (!vif->stall_timeout)
625 xenvif_queue_carrier_on(queue);
626
627 for (;;) {
628 xenvif_wait_for_rx_work(queue);
629
630 if (kthread_should_stop())
631 break;
632
633 /* This frontend is found to be rogue, disable it in
634 * kthread context. Currently this is only set when
635 * netback finds out frontend sends malformed packet,
636 * but we cannot disable the interface in softirq
637 * context so we defer it here, if this thread is
638 * associated with queue 0.
639 */
640 if (unlikely(vif->disabled && queue->id == 0)) {
641 xenvif_carrier_off(vif);
642 break;
643 }
644
645 if (!skb_queue_empty(&queue->rx_queue))
646 xenvif_rx_action(queue);
647
648 /* If the guest hasn't provided any Rx slots for a
649 * while it's probably not responsive, drop the
650 * carrier so packets are dropped earlier.
651 */
652 if (vif->stall_timeout) {
653 if (xenvif_rx_queue_stalled(queue))
654 xenvif_queue_carrier_off(queue);
655 else if (xenvif_rx_queue_ready(queue))
656 xenvif_queue_carrier_on(queue);
657 }
658
659 /* Queued packets may have foreign pages from other
660 * domains. These cannot be queued indefinitely as
661 * this would starve guests of grant refs and transmit
662 * slots.
663 */
664 xenvif_rx_queue_drop_expired(queue);
665
3254f836
PD
666 cond_resched();
667 }
668
669 /* Bin any remaining skbs */
670 xenvif_rx_queue_purge(queue);
671
672 return 0;
673}