Merge branches 'pm-devfreq', 'pm-qos', 'pm-tools' and 'pm-docs'
[linux-2.6-block.git] / drivers / net / xen-netback / rx.c
CommitLineData
3254f836
PD
1/*
2 * Copyright (c) 2016 Citrix Systems Inc.
3 * Copyright (c) 2002-2005, K A Fraser
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation; or, when distributed
8 * separately from the Linux kernel or incorporated into other
9 * software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
3254f836
PD
29#include "common.h"
30
31#include <linux/kthread.h>
32
33#include <xen/xen.h>
34#include <xen/events.h>
35
6032046e
JG
36/*
37 * Update the needed ring page slots for the first SKB queued.
38 * Note that any call sequence outside the RX thread calling this function
39 * needs to wake up the RX thread via a call of xenvif_kick_thread()
40 * afterwards in order to avoid a race with putting the thread to sleep.
41 */
42static void xenvif_update_needed_slots(struct xenvif_queue *queue,
43 const struct sk_buff *skb)
3254f836 44{
6032046e 45 unsigned int needed = 0;
3254f836 46
6032046e
JG
47 if (skb) {
48 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
49 if (skb_is_gso(skb))
50 needed++;
51 if (skb->sw_hash)
52 needed++;
ec7d8e7d 53 }
3254f836 54
6032046e
JG
55 WRITE_ONCE(queue->rx_slots_needed, needed);
56}
3254f836 57
6032046e
JG
58static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
59{
60 RING_IDX prod, cons;
61 unsigned int needed;
62
63 needed = READ_ONCE(queue->rx_slots_needed);
64 if (!needed)
65 return false;
ec7d8e7d 66
3254f836
PD
67 do {
68 prod = queue->rx.sring->req_prod;
69 cons = queue->rx.req_cons;
70
71 if (prod - cons >= needed)
72 return true;
73
74 queue->rx.sring->req_event = prod + 1;
75
76 /* Make sure event is visible before we check prod
77 * again.
78 */
79 mb();
80 } while (queue->rx.sring->req_prod != prod);
81
82 return false;
83}
84
85void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
86{
87 unsigned long flags;
88
89 spin_lock_irqsave(&queue->rx_queue.lock, flags);
90
be81992f 91 if (queue->rx_queue_len >= queue->rx_queue_max) {
3254f836
PD
92 struct net_device *dev = queue->vif->dev;
93
94 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
be81992f
JG
95 kfree_skb(skb);
96 queue->vif->dev->stats.rx_dropped++;
97 } else {
98 if (skb_queue_empty(&queue->rx_queue))
99 xenvif_update_needed_slots(queue, skb);
100
101 __skb_queue_tail(&queue->rx_queue, skb);
102
103 queue->rx_queue_len += skb->len;
3254f836
PD
104 }
105
106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
107}
108
109static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
110{
111 struct sk_buff *skb;
112
113 spin_lock_irq(&queue->rx_queue.lock);
114
115 skb = __skb_dequeue(&queue->rx_queue);
7c0b1a23 116 if (skb) {
6032046e
JG
117 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
118
3254f836 119 queue->rx_queue_len -= skb->len;
7c0b1a23
DV
120 if (queue->rx_queue_len < queue->rx_queue_max) {
121 struct netdev_queue *txq;
3254f836 122
7c0b1a23
DV
123 txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
124 netif_tx_wake_queue(txq);
125 }
3254f836
PD
126 }
127
128 spin_unlock_irq(&queue->rx_queue.lock);
7c0b1a23
DV
129
130 return skb;
3254f836
PD
131}
132
133static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
134{
135 struct sk_buff *skb;
136
137 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
138 kfree_skb(skb);
139}
140
141static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
142{
143 struct sk_buff *skb;
144
145 for (;;) {
146 skb = skb_peek(&queue->rx_queue);
147 if (!skb)
148 break;
149 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
150 break;
151 xenvif_rx_dequeue(queue);
152 kfree_skb(skb);
be81992f 153 queue->vif->dev->stats.rx_dropped++;
3254f836
PD
154 }
155}
156
eb1723a2 157static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
3254f836 158{
eb1723a2 159 unsigned int i;
a37f1229 160 int notify;
3254f836 161
eb1723a2 162 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
3254f836 163
eb1723a2
DV
164 for (i = 0; i < queue->rx_copy.num; i++) {
165 struct gnttab_copy *op;
3254f836 166
eb1723a2 167 op = &queue->rx_copy.op[i];
3254f836 168
eb1723a2
DV
169 /* If the copy failed, overwrite the status field in
170 * the corresponding response.
171 */
172 if (unlikely(op->status != GNTST_okay)) {
173 struct xen_netif_rx_response *rsp;
3254f836 174
eb1723a2
DV
175 rsp = RING_GET_RESPONSE(&queue->rx,
176 queue->rx_copy.idx[i]);
177 rsp->status = op->status;
178 }
179 }
3254f836 180
eb1723a2 181 queue->rx_copy.num = 0;
a37f1229
DV
182
183 /* Push responses for all completed packets. */
184 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
185 if (notify)
186 notify_remote_via_irq(queue->rx_irq);
187
188 __skb_queue_purge(queue->rx_copy.completed);
eb1723a2 189}
3254f836 190
eb1723a2
DV
191static void xenvif_rx_copy_add(struct xenvif_queue *queue,
192 struct xen_netif_rx_request *req,
193 unsigned int offset, void *data, size_t len)
3254f836 194{
eb1723a2
DV
195 struct gnttab_copy *op;
196 struct page *page;
3254f836 197 struct xen_page_foreign *foreign;
3254f836 198
eb1723a2
DV
199 if (queue->rx_copy.num == COPY_BATCH_SIZE)
200 xenvif_rx_copy_flush(queue);
3254f836 201
eb1723a2 202 op = &queue->rx_copy.op[queue->rx_copy.num];
3254f836 203
eb1723a2 204 page = virt_to_page(data);
3254f836 205
eb1723a2 206 op->flags = GNTCOPY_dest_gref;
3254f836
PD
207
208 foreign = xen_page_foreign(page);
209 if (foreign) {
eb1723a2
DV
210 op->source.domid = foreign->domid;
211 op->source.u.ref = foreign->gref;
212 op->flags |= GNTCOPY_source_gref;
3254f836 213 } else {
eb1723a2
DV
214 op->source.u.gmfn = virt_to_gfn(data);
215 op->source.domid = DOMID_SELF;
3254f836 216 }
3254f836 217
eb1723a2
DV
218 op->source.offset = xen_offset_in_page(data);
219 op->dest.u.ref = req->gref;
220 op->dest.domid = queue->vif->domid;
221 op->dest.offset = offset;
222 op->len = len;
3254f836 223
eb1723a2
DV
224 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
225 queue->rx_copy.num++;
3254f836
PD
226}
227
eb1723a2 228static unsigned int xenvif_gso_type(struct sk_buff *skb)
3254f836 229{
3254f836
PD
230 if (skb_is_gso(skb)) {
231 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
eb1723a2
DV
232 return XEN_NETIF_GSO_TYPE_TCPV4;
233 else
234 return XEN_NETIF_GSO_TYPE_TCPV6;
3254f836 235 }
eb1723a2
DV
236 return XEN_NETIF_GSO_TYPE_NONE;
237}
3254f836 238
eb1723a2
DV
239struct xenvif_pkt_state {
240 struct sk_buff *skb;
241 size_t remaining_len;
2167ca02
RL
242 struct sk_buff *frag_iter;
243 int frag; /* frag == -1 => frag_iter->head */
eb1723a2
DV
244 unsigned int frag_offset;
245 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
246 unsigned int extra_count;
247 unsigned int slot;
248};
3254f836 249
eb1723a2
DV
250static void xenvif_rx_next_skb(struct xenvif_queue *queue,
251 struct xenvif_pkt_state *pkt)
252{
253 struct sk_buff *skb;
254 unsigned int gso_type;
3254f836 255
eb1723a2 256 skb = xenvif_rx_dequeue(queue);
3254f836 257
eb1723a2
DV
258 queue->stats.tx_bytes += skb->len;
259 queue->stats.tx_packets++;
3254f836 260
eb1723a2
DV
261 /* Reset packet state. */
262 memset(pkt, 0, sizeof(struct xenvif_pkt_state));
3254f836 263
eb1723a2 264 pkt->skb = skb;
2167ca02 265 pkt->frag_iter = skb;
eb1723a2
DV
266 pkt->remaining_len = skb->len;
267 pkt->frag = -1;
3254f836 268
eb1723a2
DV
269 gso_type = xenvif_gso_type(skb);
270 if ((1 << gso_type) & queue->vif->gso_mask) {
271 struct xen_netif_extra_info *extra;
3254f836 272
eb1723a2 273 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
3254f836 274
eb1723a2
DV
275 extra->u.gso.type = gso_type;
276 extra->u.gso.size = skb_shinfo(skb)->gso_size;
277 extra->u.gso.pad = 0;
278 extra->u.gso.features = 0;
279 extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
280 extra->flags = 0;
3254f836 281
eb1723a2 282 pkt->extra_count++;
3254f836
PD
283 }
284
1c9535c7
DK
285 if (queue->vif->xdp_headroom) {
286 struct xen_netif_extra_info *extra;
287
288 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
289
290 memset(extra, 0, sizeof(struct xen_netif_extra_info));
291 extra->u.xdp.headroom = queue->vif->xdp_headroom;
292 extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
293 extra->flags = 0;
294
295 pkt->extra_count++;
296 }
297
eb1723a2
DV
298 if (skb->sw_hash) {
299 struct xen_netif_extra_info *extra;
3254f836 300
eb1723a2 301 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
3254f836 302
eb1723a2
DV
303 extra->u.hash.algorithm =
304 XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
3254f836 305
eb1723a2
DV
306 if (skb->l4_hash)
307 extra->u.hash.type =
308 skb->protocol == htons(ETH_P_IP) ?
309 _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
310 _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
311 else
312 extra->u.hash.type =
313 skb->protocol == htons(ETH_P_IP) ?
314 _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
315 _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
3254f836 316
eb1723a2 317 *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
3254f836 318
eb1723a2
DV
319 extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
320 extra->flags = 0;
3254f836 321
eb1723a2 322 pkt->extra_count++;
3254f836 323 }
3254f836
PD
324}
325
eb1723a2
DV
326static void xenvif_rx_complete(struct xenvif_queue *queue,
327 struct xenvif_pkt_state *pkt)
3254f836 328{
a37f1229 329 /* All responses are ready to be pushed. */
eb1723a2 330 queue->rx.rsp_prod_pvt = queue->rx.req_cons;
3254f836 331
a37f1229 332 __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
3254f836
PD
333}
334
2167ca02
RL
335static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
336{
337 struct sk_buff *frag_iter = pkt->frag_iter;
338 unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
339
340 pkt->frag++;
341 pkt->frag_offset = 0;
342
343 if (pkt->frag >= nr_frags) {
344 if (frag_iter == pkt->skb)
345 pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
346 else
347 pkt->frag_iter = frag_iter->next;
348
349 pkt->frag = -1;
350 }
351}
352
eb1723a2
DV
353static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
354 struct xenvif_pkt_state *pkt,
355 unsigned int offset, void **data,
356 size_t *len)
3254f836 357{
2167ca02 358 struct sk_buff *frag_iter = pkt->frag_iter;
eb1723a2
DV
359 void *frag_data;
360 size_t frag_len, chunk_len;
3254f836 361
2167ca02
RL
362 BUG_ON(!frag_iter);
363
eb1723a2 364 if (pkt->frag == -1) {
2167ca02
RL
365 frag_data = frag_iter->data;
366 frag_len = skb_headlen(frag_iter);
eb1723a2 367 } else {
2167ca02 368 skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
3254f836 369
eb1723a2
DV
370 frag_data = skb_frag_address(frag);
371 frag_len = skb_frag_size(frag);
3254f836 372 }
3254f836 373
eb1723a2
DV
374 frag_data += pkt->frag_offset;
375 frag_len -= pkt->frag_offset;
3254f836 376
f112be65
AB
377 chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
378 chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
379 xen_offset_in_page(frag_data));
3254f836 380
eb1723a2 381 pkt->frag_offset += chunk_len;
3254f836 382
eb1723a2 383 /* Advance to next frag? */
2167ca02
RL
384 if (frag_len == chunk_len)
385 xenvif_rx_next_frag(pkt);
3254f836 386
eb1723a2
DV
387 *data = frag_data;
388 *len = chunk_len;
389}
3254f836 390
eb1723a2
DV
391static void xenvif_rx_data_slot(struct xenvif_queue *queue,
392 struct xenvif_pkt_state *pkt,
393 struct xen_netif_rx_request *req,
394 struct xen_netif_rx_response *rsp)
395{
1c9535c7 396 unsigned int offset = queue->vif->xdp_headroom;
eb1723a2 397 unsigned int flags;
3254f836 398
eb1723a2
DV
399 do {
400 size_t len;
401 void *data;
3254f836 402
eb1723a2
DV
403 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
404 xenvif_rx_copy_add(queue, req, offset, data, len);
3254f836 405
eb1723a2
DV
406 offset += len;
407 pkt->remaining_len -= len;
3254f836 408
eb1723a2 409 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
3254f836 410
eb1723a2
DV
411 if (pkt->remaining_len > 0)
412 flags = XEN_NETRXF_more_data;
413 else
414 flags = 0;
415
416 if (pkt->slot == 0) {
417 struct sk_buff *skb = pkt->skb;
3254f836 418
eb1723a2 419 if (skb->ip_summed == CHECKSUM_PARTIAL)
3254f836
PD
420 flags |= XEN_NETRXF_csum_blank |
421 XEN_NETRXF_data_validated;
422 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
3254f836
PD
423 flags |= XEN_NETRXF_data_validated;
424
eb1723a2
DV
425 if (pkt->extra_count != 0)
426 flags |= XEN_NETRXF_extra_info;
427 }
3254f836 428
eb1723a2
DV
429 rsp->offset = 0;
430 rsp->flags = flags;
431 rsp->id = req->id;
432 rsp->status = (s16)offset;
433}
3254f836 434
eb1723a2
DV
435static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
436 struct xenvif_pkt_state *pkt,
437 struct xen_netif_rx_request *req,
438 struct xen_netif_rx_response *rsp)
439{
440 struct xen_netif_extra_info *extra = (void *)rsp;
441 unsigned int i;
3254f836 442
eb1723a2 443 pkt->extra_count--;
3254f836 444
eb1723a2
DV
445 for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
446 if (pkt->extras[i].type) {
447 *extra = pkt->extras[i];
3254f836 448
eb1723a2 449 if (pkt->extra_count != 0)
3254f836 450 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
eb1723a2
DV
451
452 pkt->extras[i].type = 0;
453 return;
3254f836 454 }
eb1723a2
DV
455 }
456 BUG();
457}
3254f836 458
ed820f47 459static void xenvif_rx_skb(struct xenvif_queue *queue)
eb1723a2
DV
460{
461 struct xenvif_pkt_state pkt;
3254f836 462
eb1723a2 463 xenvif_rx_next_skb(queue, &pkt);
3254f836 464
d1ef006d
DV
465 queue->last_rx_time = jiffies;
466
eb1723a2
DV
467 do {
468 struct xen_netif_rx_request *req;
469 struct xen_netif_rx_response *rsp;
3254f836 470
eb1723a2
DV
471 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
472 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
3254f836 473
eb1723a2
DV
474 /* Extras must go after the first data slot */
475 if (pkt.slot != 0 && pkt.extra_count != 0)
476 xenvif_rx_extra_slot(queue, &pkt, req, rsp);
477 else
478 xenvif_rx_data_slot(queue, &pkt, req, rsp);
479
480 queue->rx.req_cons++;
481 pkt.slot++;
482 } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
483
484 xenvif_rx_complete(queue, &pkt);
3254f836
PD
485}
486
98f6d57c
DV
487#define RX_BATCH_SIZE 64
488
489void xenvif_rx_action(struct xenvif_queue *queue)
490{
a37f1229 491 struct sk_buff_head completed_skbs;
98f6d57c
DV
492 unsigned int work_done = 0;
493
a37f1229
DV
494 __skb_queue_head_init(&completed_skbs);
495 queue->rx_copy.completed = &completed_skbs;
496
98f6d57c 497 while (xenvif_rx_ring_slots_available(queue) &&
94e81006 498 !skb_queue_empty(&queue->rx_queue) &&
98f6d57c
DV
499 work_done < RX_BATCH_SIZE) {
500 xenvif_rx_skb(queue);
501 work_done++;
502 }
a37f1229
DV
503
504 /* Flush any pending copies and complete all skbs. */
505 xenvif_rx_copy_flush(queue);
98f6d57c
DV
506}
507
6032046e 508static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
3254f836
PD
509{
510 RING_IDX prod, cons;
511
512 prod = queue->rx.sring->req_prod;
513 cons = queue->rx.req_cons;
514
6032046e
JG
515 return prod - cons;
516}
517
518static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
519{
520 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
521
3254f836 522 return !queue->stalled &&
6032046e 523 xenvif_rx_queue_slots(queue) < needed &&
3254f836
PD
524 time_after(jiffies,
525 queue->last_rx_time + queue->vif->stall_timeout);
526}
527
528static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
529{
6032046e 530 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
3254f836 531
6032046e 532 return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
3254f836
PD
533}
534
23025393 535bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
3254f836
PD
536{
537 return xenvif_rx_ring_slots_available(queue) ||
538 (queue->vif->stall_timeout &&
539 (xenvif_rx_queue_stalled(queue) ||
540 xenvif_rx_queue_ready(queue))) ||
23025393 541 (test_kthread && kthread_should_stop()) ||
3254f836
PD
542 queue->vif->disabled;
543}
544
545static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
546{
547 struct sk_buff *skb;
548 long timeout;
549
550 skb = skb_peek(&queue->rx_queue);
551 if (!skb)
552 return MAX_SCHEDULE_TIMEOUT;
553
554 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
555 return timeout < 0 ? 0 : timeout;
556}
557
558/* Wait until the guest Rx thread has work.
559 *
560 * The timeout needs to be adjusted based on the current head of the
561 * queue (and not just the head at the beginning). In particular, if
562 * the queue is initially empty an infinite timeout is used and this
563 * needs to be reduced when a skb is queued.
564 *
565 * This cannot be done with wait_event_timeout() because it only
566 * calculates the timeout once.
567 */
568static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
569{
570 DEFINE_WAIT(wait);
571
23025393 572 if (xenvif_have_rx_work(queue, true))
3254f836
PD
573 return;
574
575 for (;;) {
576 long ret;
577
578 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
23025393 579 if (xenvif_have_rx_work(queue, true))
3254f836 580 break;
23025393
JG
581 if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
582 &queue->eoi_pending) &
583 (NETBK_RX_EOI | NETBK_COMMON_EOI))
584 xen_irq_lateeoi(queue->rx_irq, 0);
585
3254f836
PD
586 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
587 if (!ret)
588 break;
589 }
590 finish_wait(&queue->wq, &wait);
591}
592
593static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
594{
595 struct xenvif *vif = queue->vif;
596
597 queue->stalled = true;
598
599 /* At least one queue has stalled? Disable the carrier. */
600 spin_lock(&vif->lock);
601 if (vif->stalled_queues++ == 0) {
602 netdev_info(vif->dev, "Guest Rx stalled");
603 netif_carrier_off(vif->dev);
604 }
605 spin_unlock(&vif->lock);
606}
607
608static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
609{
610 struct xenvif *vif = queue->vif;
611
612 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
613 queue->stalled = false;
614
615 /* All queues are ready? Enable the carrier. */
616 spin_lock(&vif->lock);
617 if (--vif->stalled_queues == 0) {
618 netdev_info(vif->dev, "Guest Rx ready");
619 netif_carrier_on(vif->dev);
620 }
621 spin_unlock(&vif->lock);
622}
623
624int xenvif_kthread_guest_rx(void *data)
625{
626 struct xenvif_queue *queue = data;
627 struct xenvif *vif = queue->vif;
628
629 if (!vif->stall_timeout)
630 xenvif_queue_carrier_on(queue);
631
632 for (;;) {
633 xenvif_wait_for_rx_work(queue);
634
635 if (kthread_should_stop())
636 break;
637
638 /* This frontend is found to be rogue, disable it in
639 * kthread context. Currently this is only set when
640 * netback finds out frontend sends malformed packet,
641 * but we cannot disable the interface in softirq
642 * context so we defer it here, if this thread is
643 * associated with queue 0.
644 */
645 if (unlikely(vif->disabled && queue->id == 0)) {
646 xenvif_carrier_off(vif);
647 break;
648 }
649
650 if (!skb_queue_empty(&queue->rx_queue))
651 xenvif_rx_action(queue);
652
653 /* If the guest hasn't provided any Rx slots for a
654 * while it's probably not responsive, drop the
655 * carrier so packets are dropped earlier.
656 */
657 if (vif->stall_timeout) {
658 if (xenvif_rx_queue_stalled(queue))
659 xenvif_queue_carrier_off(queue);
660 else if (xenvif_rx_queue_ready(queue))
661 xenvif_queue_carrier_on(queue);
662 }
663
664 /* Queued packets may have foreign pages from other
665 * domains. These cannot be queued indefinitely as
666 * this would starve guests of grant refs and transmit
667 * slots.
668 */
669 xenvif_rx_queue_drop_expired(queue);
670
3254f836
PD
671 cond_resched();
672 }
673
674 /* Bin any remaining skbs */
675 xenvif_rx_queue_purge(queue);
676
677 return 0;
678}