vdpa/mlx5: Clear vq ready indication upon device reset
[linux-block.git] / drivers / virtio / virtio_ring.c
CommitLineData
fd534e9b 1// SPDX-License-Identifier: GPL-2.0-or-later
0a8a69dd
RR
2/* Virtio ring implementation.
3 *
4 * Copyright 2007 Rusty Russell IBM Corporation
0a8a69dd
RR
5 */
6#include <linux/virtio.h>
7#include <linux/virtio_ring.h>
e34f8725 8#include <linux/virtio_config.h>
0a8a69dd 9#include <linux/device.h>
5a0e3ad6 10#include <linux/slab.h>
b5a2c4f1 11#include <linux/module.h>
e93300b1 12#include <linux/hrtimer.h>
780bc790 13#include <linux/dma-mapping.h>
78fe3987 14#include <xen/xen.h>
0a8a69dd
RR
15
16#ifdef DEBUG
17/* For development, we want to crash whenever the ring is screwed. */
9499f5e7
RR
18#define BAD_RING(_vq, fmt, args...) \
19 do { \
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
22 BUG(); \
23 } while (0)
c5f841f1
RR
24/* Caller is supposed to guarantee no reentry. */
25#define START_USE(_vq) \
26 do { \
27 if ((_vq)->in_use) \
9499f5e7
RR
28 panic("%s:in_use = %i\n", \
29 (_vq)->vq.name, (_vq)->in_use); \
c5f841f1 30 (_vq)->in_use = __LINE__; \
9499f5e7 31 } while (0)
3a35ce7d 32#define END_USE(_vq) \
97a545ab 33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
4d6a105e
TB
34#define LAST_ADD_TIME_UPDATE(_vq) \
35 do { \
36 ktime_t now = ktime_get(); \
37 \
38 /* No kick or get, with .1 second between? Warn. */ \
39 if ((_vq)->last_add_time_valid) \
40 WARN_ON(ktime_to_ms(ktime_sub(now, \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
44 } while (0)
45#define LAST_ADD_TIME_CHECK(_vq) \
46 do { \
47 if ((_vq)->last_add_time_valid) { \
48 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
49 (_vq)->last_add_time)) > 100); \
50 } \
51 } while (0)
52#define LAST_ADD_TIME_INVALID(_vq) \
53 ((_vq)->last_add_time_valid = false)
0a8a69dd 54#else
9499f5e7
RR
55#define BAD_RING(_vq, fmt, args...) \
56 do { \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
60 } while (0)
0a8a69dd
RR
61#define START_USE(vq)
62#define END_USE(vq)
4d6a105e
TB
63#define LAST_ADD_TIME_UPDATE(vq)
64#define LAST_ADD_TIME_CHECK(vq)
65#define LAST_ADD_TIME_INVALID(vq)
0a8a69dd
RR
66#endif
67
cbeedb72 68struct vring_desc_state_split {
780bc790
AL
69 void *data; /* Data for callback. */
70 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
71};
72
1ce9e605
TB
73struct vring_desc_state_packed {
74 void *data; /* Data for callback. */
75 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
76 u16 num; /* Descriptor list length. */
77 u16 next; /* The next desc state in a list. */
78 u16 last; /* The last desc state in a list. */
79};
80
81struct vring_desc_extra_packed {
82 dma_addr_t addr; /* Buffer DMA addr. */
83 u32 len; /* Buffer length. */
84 u16 flags; /* Descriptor flags. */
85};
86
43b4f721 87struct vring_virtqueue {
0a8a69dd
RR
88 struct virtqueue vq;
89
1ce9e605
TB
90 /* Is this a packed ring? */
91 bool packed_ring;
92
fb3fba6b
TB
93 /* Is DMA API used? */
94 bool use_dma_api;
95
7b21e34f
RR
96 /* Can we use weak barriers? */
97 bool weak_barriers;
98
0a8a69dd
RR
99 /* Other side has made a mess, don't try any more. */
100 bool broken;
101
9fa29b9d
MM
102 /* Host supports indirect buffers */
103 bool indirect;
104
a5c262c5
MT
105 /* Host publishes avail event idx */
106 bool event;
107
0a8a69dd
RR
108 /* Head of free buffer list. */
109 unsigned int free_head;
110 /* Number we've added since last sync. */
111 unsigned int num_added;
112
113 /* Last used index we've seen. */
1bc4953e 114 u16 last_used_idx;
0a8a69dd 115
8d622d21
MT
116 /* Hint for event idx: already triggered no need to disable. */
117 bool event_triggered;
118
1ce9e605
TB
119 union {
120 /* Available for split ring */
121 struct {
122 /* Actual memory layout for this queue. */
123 struct vring vring;
124
125 /* Last written value to avail->flags */
126 u16 avail_flags_shadow;
f277ec42 127
1ce9e605
TB
128 /*
129 * Last written value to avail->idx in
130 * guest byte order.
131 */
132 u16 avail_idx_shadow;
133
134 /* Per-descriptor state. */
135 struct vring_desc_state_split *desc_state;
136
137 /* DMA address and size information */
138 dma_addr_t queue_dma_addr;
139 size_t queue_size_in_bytes;
140 } split;
e593bf97 141
1ce9e605
TB
142 /* Available for packed ring */
143 struct {
144 /* Actual memory layout for this queue. */
9c0644ee
MT
145 struct {
146 unsigned int num;
147 struct vring_packed_desc *desc;
148 struct vring_packed_desc_event *driver;
149 struct vring_packed_desc_event *device;
150 } vring;
cbeedb72 151
1ce9e605
TB
152 /* Driver ring wrap counter. */
153 bool avail_wrap_counter;
d79dca75 154
1ce9e605
TB
155 /* Device ring wrap counter. */
156 bool used_wrap_counter;
157
158 /* Avail used flags. */
159 u16 avail_used_flags;
160
161 /* Index of the next avail descriptor. */
162 u16 next_avail_idx;
163
164 /*
165 * Last written value to driver->flags in
166 * guest byte order.
167 */
168 u16 event_flags_shadow;
169
170 /* Per-descriptor state. */
171 struct vring_desc_state_packed *desc_state;
172 struct vring_desc_extra_packed *desc_extra;
173
174 /* DMA address and size information */
175 dma_addr_t ring_dma_addr;
176 dma_addr_t driver_event_dma_addr;
177 dma_addr_t device_event_dma_addr;
178 size_t ring_size_in_bytes;
179 size_t event_size_in_bytes;
180 } packed;
181 };
f277ec42 182
0a8a69dd 183 /* How to notify other side. FIXME: commonalize hcalls! */
46f9c2b9 184 bool (*notify)(struct virtqueue *vq);
0a8a69dd 185
2a2d1382
AL
186 /* DMA, allocation, and size information */
187 bool we_own_ring;
2a2d1382 188
0a8a69dd
RR
189#ifdef DEBUG
190 /* They're supposed to lock for us. */
191 unsigned int in_use;
e93300b1
RR
192
193 /* Figure out if their kicks are too delayed. */
194 bool last_add_time_valid;
195 ktime_t last_add_time;
0a8a69dd 196#endif
0a8a69dd
RR
197};
198
e6f633e5
TB
199
200/*
201 * Helpers.
202 */
203
0a8a69dd
RR
204#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
205
2f18c2d1
TB
206static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
207 unsigned int total_sg)
208{
209 struct vring_virtqueue *vq = to_vvq(_vq);
210
211 /*
212 * If the host supports indirect descriptor tables, and we have multiple
213 * buffers, then go indirect. FIXME: tune this threshold
214 */
215 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
216}
217
d26c96c8 218/*
1a937693
MT
219 * Modern virtio devices have feature bits to specify whether they need a
220 * quirk and bypass the IOMMU. If not there, just use the DMA API.
221 *
222 * If there, the interaction between virtio and DMA API is messy.
d26c96c8
AL
223 *
224 * On most systems with virtio, physical addresses match bus addresses,
225 * and it doesn't particularly matter whether we use the DMA API.
226 *
227 * On some systems, including Xen and any system with a physical device
228 * that speaks virtio behind a physical IOMMU, we must use the DMA API
229 * for virtio DMA to work at all.
230 *
231 * On other systems, including SPARC and PPC64, virtio-pci devices are
232 * enumerated as though they are behind an IOMMU, but the virtio host
233 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
234 * there or somehow map everything as the identity.
235 *
236 * For the time being, we preserve historic behavior and bypass the DMA
237 * API.
1a937693
MT
238 *
239 * TODO: install a per-device DMA ops structure that does the right thing
240 * taking into account all the above quirks, and use the DMA API
241 * unconditionally on data path.
d26c96c8
AL
242 */
243
244static bool vring_use_dma_api(struct virtio_device *vdev)
245{
24b6842a 246 if (!virtio_has_dma_quirk(vdev))
1a937693
MT
247 return true;
248
249 /* Otherwise, we are left to guess. */
78fe3987
AL
250 /*
251 * In theory, it's possible to have a buggy QEMU-supposed
252 * emulated Q35 IOMMU and Xen enabled at the same time. On
253 * such a configuration, virtio has never worked and will
254 * not work without an even larger kludge. Instead, enable
255 * the DMA API if we're a Xen guest, which at least allows
256 * all of the sensible Xen configurations to work correctly.
257 */
258 if (xen_domain())
259 return true;
260
d26c96c8
AL
261 return false;
262}
263
e6d6dd6c
JR
264size_t virtio_max_dma_size(struct virtio_device *vdev)
265{
266 size_t max_segment_size = SIZE_MAX;
267
268 if (vring_use_dma_api(vdev))
269 max_segment_size = dma_max_mapping_size(&vdev->dev);
270
271 return max_segment_size;
272}
273EXPORT_SYMBOL_GPL(virtio_max_dma_size);
274
d79dca75
TB
275static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
276 dma_addr_t *dma_handle, gfp_t flag)
277{
278 if (vring_use_dma_api(vdev)) {
279 return dma_alloc_coherent(vdev->dev.parent, size,
280 dma_handle, flag);
281 } else {
282 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
283
284 if (queue) {
285 phys_addr_t phys_addr = virt_to_phys(queue);
286 *dma_handle = (dma_addr_t)phys_addr;
287
288 /*
289 * Sanity check: make sure we dind't truncate
290 * the address. The only arches I can find that
291 * have 64-bit phys_addr_t but 32-bit dma_addr_t
292 * are certain non-highmem MIPS and x86
293 * configurations, but these configurations
294 * should never allocate physical pages above 32
295 * bits, so this is fine. Just in case, throw a
296 * warning and abort if we end up with an
297 * unrepresentable address.
298 */
299 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
300 free_pages_exact(queue, PAGE_ALIGN(size));
301 return NULL;
302 }
303 }
304 return queue;
305 }
306}
307
308static void vring_free_queue(struct virtio_device *vdev, size_t size,
309 void *queue, dma_addr_t dma_handle)
310{
311 if (vring_use_dma_api(vdev))
312 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
313 else
314 free_pages_exact(queue, PAGE_ALIGN(size));
315}
316
780bc790
AL
317/*
318 * The DMA ops on various arches are rather gnarly right now, and
319 * making all of the arch DMA ops work on the vring device itself
320 * is a mess. For now, we use the parent device for DMA ops.
321 */
75bfa81b 322static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
780bc790
AL
323{
324 return vq->vq.vdev->dev.parent;
325}
326
327/* Map one sg entry. */
328static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
329 struct scatterlist *sg,
330 enum dma_data_direction direction)
331{
fb3fba6b 332 if (!vq->use_dma_api)
780bc790
AL
333 return (dma_addr_t)sg_phys(sg);
334
335 /*
336 * We can't use dma_map_sg, because we don't use scatterlists in
337 * the way it expects (we don't guarantee that the scatterlist
338 * will exist for the lifetime of the mapping).
339 */
340 return dma_map_page(vring_dma_dev(vq),
341 sg_page(sg), sg->offset, sg->length,
342 direction);
343}
344
345static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
346 void *cpu_addr, size_t size,
347 enum dma_data_direction direction)
348{
fb3fba6b 349 if (!vq->use_dma_api)
780bc790
AL
350 return (dma_addr_t)virt_to_phys(cpu_addr);
351
352 return dma_map_single(vring_dma_dev(vq),
353 cpu_addr, size, direction);
354}
355
e6f633e5
TB
356static int vring_mapping_error(const struct vring_virtqueue *vq,
357 dma_addr_t addr)
358{
fb3fba6b 359 if (!vq->use_dma_api)
e6f633e5
TB
360 return 0;
361
362 return dma_mapping_error(vring_dma_dev(vq), addr);
363}
364
365
366/*
367 * Split ring specific functions - *_split().
368 */
369
138fd251
TB
370static void vring_unmap_one_split(const struct vring_virtqueue *vq,
371 struct vring_desc *desc)
780bc790
AL
372{
373 u16 flags;
374
fb3fba6b 375 if (!vq->use_dma_api)
780bc790
AL
376 return;
377
378 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
379
380 if (flags & VRING_DESC_F_INDIRECT) {
381 dma_unmap_single(vring_dma_dev(vq),
382 virtio64_to_cpu(vq->vq.vdev, desc->addr),
383 virtio32_to_cpu(vq->vq.vdev, desc->len),
384 (flags & VRING_DESC_F_WRITE) ?
385 DMA_FROM_DEVICE : DMA_TO_DEVICE);
386 } else {
387 dma_unmap_page(vring_dma_dev(vq),
388 virtio64_to_cpu(vq->vq.vdev, desc->addr),
389 virtio32_to_cpu(vq->vq.vdev, desc->len),
390 (flags & VRING_DESC_F_WRITE) ?
391 DMA_FROM_DEVICE : DMA_TO_DEVICE);
392 }
393}
394
138fd251
TB
395static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
396 unsigned int total_sg,
397 gfp_t gfp)
9fa29b9d
MM
398{
399 struct vring_desc *desc;
b25bd251 400 unsigned int i;
9fa29b9d 401
b92b1b89
WD
402 /*
403 * We require lowmem mappings for the descriptors because
404 * otherwise virt_to_phys will give us bogus addresses in the
405 * virtqueue.
406 */
82107539 407 gfp &= ~__GFP_HIGHMEM;
b92b1b89 408
6da2ec56 409 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
9fa29b9d 410 if (!desc)
b25bd251 411 return NULL;
9fa29b9d 412
b25bd251 413 for (i = 0; i < total_sg; i++)
00e6f3d9 414 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
b25bd251 415 return desc;
9fa29b9d
MM
416}
417
138fd251
TB
418static inline int virtqueue_add_split(struct virtqueue *_vq,
419 struct scatterlist *sgs[],
420 unsigned int total_sg,
421 unsigned int out_sgs,
422 unsigned int in_sgs,
423 void *data,
424 void *ctx,
425 gfp_t gfp)
0a8a69dd
RR
426{
427 struct vring_virtqueue *vq = to_vvq(_vq);
13816c76 428 struct scatterlist *sg;
b25bd251 429 struct vring_desc *desc;
3f649ab7 430 unsigned int i, n, avail, descs_used, prev, err_idx;
1fe9b6fe 431 int head;
b25bd251 432 bool indirect;
0a8a69dd 433
9fa29b9d
MM
434 START_USE(vq);
435
0a8a69dd 436 BUG_ON(data == NULL);
5a08b04f 437 BUG_ON(ctx && vq->indirect);
9fa29b9d 438
70670444
RR
439 if (unlikely(vq->broken)) {
440 END_USE(vq);
441 return -EIO;
442 }
443
4d6a105e 444 LAST_ADD_TIME_UPDATE(vq);
e93300b1 445
b25bd251
RR
446 BUG_ON(total_sg == 0);
447
448 head = vq->free_head;
449
2f18c2d1 450 if (virtqueue_use_indirect(_vq, total_sg))
138fd251 451 desc = alloc_indirect_split(_vq, total_sg, gfp);
44ed8089 452 else {
b25bd251 453 desc = NULL;
e593bf97 454 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
44ed8089 455 }
b25bd251
RR
456
457 if (desc) {
458 /* Use a single buffer which doesn't continue */
780bc790 459 indirect = true;
b25bd251
RR
460 /* Set up rest to use this indirect table. */
461 i = 0;
462 descs_used = 1;
b25bd251 463 } else {
780bc790 464 indirect = false;
e593bf97 465 desc = vq->split.vring.desc;
b25bd251
RR
466 i = head;
467 descs_used = total_sg;
9fa29b9d
MM
468 }
469
b25bd251 470 if (vq->vq.num_free < descs_used) {
0a8a69dd 471 pr_debug("Can't add buf len %i - avail = %i\n",
b25bd251 472 descs_used, vq->vq.num_free);
44653eae
RR
473 /* FIXME: for historical reasons, we force a notify here if
474 * there are outgoing parts to the buffer. Presumably the
475 * host should service the ring ASAP. */
13816c76 476 if (out_sgs)
44653eae 477 vq->notify(&vq->vq);
58625edf
WY
478 if (indirect)
479 kfree(desc);
0a8a69dd
RR
480 END_USE(vq);
481 return -ENOSPC;
482 }
483
13816c76 484 for (n = 0; n < out_sgs; n++) {
eeebf9b1 485 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
486 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
487 if (vring_mapping_error(vq, addr))
488 goto unmap_release;
489
00e6f3d9 490 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
780bc790 491 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 492 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 493 prev = i;
00e6f3d9 494 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 495 }
0a8a69dd 496 }
13816c76 497 for (; n < (out_sgs + in_sgs); n++) {
eeebf9b1 498 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
499 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
500 if (vring_mapping_error(vq, addr))
501 goto unmap_release;
502
00e6f3d9 503 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
780bc790 504 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 505 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 506 prev = i;
00e6f3d9 507 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 508 }
0a8a69dd
RR
509 }
510 /* Last one doesn't continue. */
00e6f3d9 511 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
0a8a69dd 512
780bc790
AL
513 if (indirect) {
514 /* Now that the indirect table is filled in, map it. */
515 dma_addr_t addr = vring_map_single(
516 vq, desc, total_sg * sizeof(struct vring_desc),
517 DMA_TO_DEVICE);
518 if (vring_mapping_error(vq, addr))
519 goto unmap_release;
520
e593bf97
TB
521 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
522 VRING_DESC_F_INDIRECT);
523 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
524 addr);
780bc790 525
e593bf97
TB
526 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
527 total_sg * sizeof(struct vring_desc));
780bc790
AL
528 }
529
530 /* We're using some buffers from the free list. */
531 vq->vq.num_free -= descs_used;
532
0a8a69dd 533 /* Update free pointer */
b25bd251 534 if (indirect)
e593bf97
TB
535 vq->free_head = virtio16_to_cpu(_vq->vdev,
536 vq->split.vring.desc[head].next);
b25bd251
RR
537 else
538 vq->free_head = i;
0a8a69dd 539
780bc790 540 /* Store token and indirect buffer state. */
cbeedb72 541 vq->split.desc_state[head].data = data;
780bc790 542 if (indirect)
cbeedb72 543 vq->split.desc_state[head].indir_desc = desc;
87646a34 544 else
cbeedb72 545 vq->split.desc_state[head].indir_desc = ctx;
0a8a69dd
RR
546
547 /* Put entry in available array (but don't update avail->idx until they
3b720b8c 548 * do sync). */
e593bf97
TB
549 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
550 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
0a8a69dd 551
ee7cd898
RR
552 /* Descriptors and available array need to be set before we expose the
553 * new available array entries. */
a9a0fef7 554 virtio_wmb(vq->weak_barriers);
e593bf97
TB
555 vq->split.avail_idx_shadow++;
556 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
557 vq->split.avail_idx_shadow);
ee7cd898
RR
558 vq->num_added++;
559
5e05bf58
TH
560 pr_debug("Added buffer head %i to %p\n", head, vq);
561 END_USE(vq);
562
ee7cd898
RR
563 /* This is very unlikely, but theoretically possible. Kick
564 * just in case. */
565 if (unlikely(vq->num_added == (1 << 16) - 1))
566 virtqueue_kick(_vq);
567
98e8c6bc 568 return 0;
780bc790
AL
569
570unmap_release:
571 err_idx = i;
cf8f1696
ML
572
573 if (indirect)
574 i = 0;
575 else
576 i = head;
780bc790
AL
577
578 for (n = 0; n < total_sg; n++) {
579 if (i == err_idx)
580 break;
138fd251 581 vring_unmap_one_split(vq, &desc[i]);
cf8f1696 582 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
780bc790
AL
583 }
584
780bc790
AL
585 if (indirect)
586 kfree(desc);
587
3cc36f6e 588 END_USE(vq);
f7728002 589 return -ENOMEM;
0a8a69dd 590}
13816c76 591
138fd251 592static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
0a8a69dd
RR
593{
594 struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5 595 u16 new, old;
41f0377f
RR
596 bool needs_kick;
597
0a8a69dd 598 START_USE(vq);
a72caae2
JW
599 /* We need to expose available array entries before checking avail
600 * event. */
a9a0fef7 601 virtio_mb(vq->weak_barriers);
0a8a69dd 602
e593bf97
TB
603 old = vq->split.avail_idx_shadow - vq->num_added;
604 new = vq->split.avail_idx_shadow;
0a8a69dd
RR
605 vq->num_added = 0;
606
4d6a105e
TB
607 LAST_ADD_TIME_CHECK(vq);
608 LAST_ADD_TIME_INVALID(vq);
e93300b1 609
41f0377f 610 if (vq->event) {
e593bf97
TB
611 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
612 vring_avail_event(&vq->split.vring)),
41f0377f
RR
613 new, old);
614 } else {
e593bf97
TB
615 needs_kick = !(vq->split.vring.used->flags &
616 cpu_to_virtio16(_vq->vdev,
617 VRING_USED_F_NO_NOTIFY));
41f0377f 618 }
0a8a69dd 619 END_USE(vq);
41f0377f
RR
620 return needs_kick;
621}
138fd251 622
138fd251
TB
623static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
624 void **ctx)
0a8a69dd 625{
780bc790 626 unsigned int i, j;
c60923cb 627 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
0a8a69dd
RR
628
629 /* Clear data ptr. */
cbeedb72 630 vq->split.desc_state[head].data = NULL;
0a8a69dd 631
780bc790 632 /* Put back on free list: unmap first-level descriptors and find end */
0a8a69dd 633 i = head;
9fa29b9d 634
e593bf97
TB
635 while (vq->split.vring.desc[i].flags & nextflag) {
636 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
637 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
06ca287d 638 vq->vq.num_free++;
0a8a69dd
RR
639 }
640
e593bf97
TB
641 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
642 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
643 vq->free_head);
0a8a69dd 644 vq->free_head = head;
780bc790 645
0a8a69dd 646 /* Plus final descriptor */
06ca287d 647 vq->vq.num_free++;
780bc790 648
5a08b04f 649 if (vq->indirect) {
cbeedb72
TB
650 struct vring_desc *indir_desc =
651 vq->split.desc_state[head].indir_desc;
5a08b04f
MT
652 u32 len;
653
654 /* Free the indirect table, if any, now that it's unmapped. */
655 if (!indir_desc)
656 return;
657
e593bf97
TB
658 len = virtio32_to_cpu(vq->vq.vdev,
659 vq->split.vring.desc[head].len);
780bc790 660
e593bf97 661 BUG_ON(!(vq->split.vring.desc[head].flags &
780bc790
AL
662 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
663 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
664
665 for (j = 0; j < len / sizeof(struct vring_desc); j++)
138fd251 666 vring_unmap_one_split(vq, &indir_desc[j]);
780bc790 667
5a08b04f 668 kfree(indir_desc);
cbeedb72 669 vq->split.desc_state[head].indir_desc = NULL;
5a08b04f 670 } else if (ctx) {
cbeedb72 671 *ctx = vq->split.desc_state[head].indir_desc;
780bc790 672 }
0a8a69dd
RR
673}
674
138fd251 675static inline bool more_used_split(const struct vring_virtqueue *vq)
0a8a69dd 676{
e593bf97
TB
677 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
678 vq->split.vring.used->idx);
0a8a69dd
RR
679}
680
138fd251
TB
681static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
682 unsigned int *len,
683 void **ctx)
0a8a69dd
RR
684{
685 struct vring_virtqueue *vq = to_vvq(_vq);
686 void *ret;
687 unsigned int i;
3b720b8c 688 u16 last_used;
0a8a69dd
RR
689
690 START_USE(vq);
691
5ef82752
RR
692 if (unlikely(vq->broken)) {
693 END_USE(vq);
694 return NULL;
695 }
696
138fd251 697 if (!more_used_split(vq)) {
0a8a69dd
RR
698 pr_debug("No more buffers in queue\n");
699 END_USE(vq);
700 return NULL;
701 }
702
2d61ba95 703 /* Only get used array entries after they have been exposed by host. */
a9a0fef7 704 virtio_rmb(vq->weak_barriers);
2d61ba95 705
e593bf97
TB
706 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
707 i = virtio32_to_cpu(_vq->vdev,
708 vq->split.vring.used->ring[last_used].id);
709 *len = virtio32_to_cpu(_vq->vdev,
710 vq->split.vring.used->ring[last_used].len);
0a8a69dd 711
e593bf97 712 if (unlikely(i >= vq->split.vring.num)) {
0a8a69dd
RR
713 BAD_RING(vq, "id %u out of range\n", i);
714 return NULL;
715 }
cbeedb72 716 if (unlikely(!vq->split.desc_state[i].data)) {
0a8a69dd
RR
717 BAD_RING(vq, "id %u is not a head!\n", i);
718 return NULL;
719 }
720
138fd251 721 /* detach_buf_split clears data, so grab it now. */
cbeedb72 722 ret = vq->split.desc_state[i].data;
138fd251 723 detach_buf_split(vq, i, ctx);
0a8a69dd 724 vq->last_used_idx++;
a5c262c5
MT
725 /* If we expect an interrupt for the next entry, tell host
726 * by writing event index and flush out the write before
727 * the read in the next get_buf call. */
e593bf97 728 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
788e5b3a 729 virtio_store_mb(vq->weak_barriers,
e593bf97 730 &vring_used_event(&vq->split.vring),
788e5b3a 731 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
a5c262c5 732
4d6a105e 733 LAST_ADD_TIME_INVALID(vq);
e93300b1 734
0a8a69dd
RR
735 END_USE(vq);
736 return ret;
737}
138fd251 738
138fd251 739static void virtqueue_disable_cb_split(struct virtqueue *_vq)
18445c4d
RR
740{
741 struct vring_virtqueue *vq = to_vvq(_vq);
742
e593bf97
TB
743 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
744 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
8d622d21
MT
745 if (vq->event)
746 /* TODO: this is a hack. Figure out a cleaner value to write. */
747 vring_used_event(&vq->split.vring) = 0x0;
748 else
e593bf97
TB
749 vq->split.vring.avail->flags =
750 cpu_to_virtio16(_vq->vdev,
751 vq->split.avail_flags_shadow);
f277ec42 752 }
18445c4d
RR
753}
754
138fd251 755static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
0a8a69dd
RR
756{
757 struct vring_virtqueue *vq = to_vvq(_vq);
cc229884 758 u16 last_used_idx;
0a8a69dd
RR
759
760 START_USE(vq);
0a8a69dd
RR
761
762 /* We optimistically turn back on interrupts, then check if there was
763 * more to do. */
a5c262c5
MT
764 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
765 * either clear the flags bit or point the event index at the next
766 * entry. Always do both to keep code simple. */
e593bf97
TB
767 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
768 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 769 if (!vq->event)
e593bf97
TB
770 vq->split.vring.avail->flags =
771 cpu_to_virtio16(_vq->vdev,
772 vq->split.avail_flags_shadow);
f277ec42 773 }
e593bf97
TB
774 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
775 last_used_idx = vq->last_used_idx);
cc229884
MT
776 END_USE(vq);
777 return last_used_idx;
778}
138fd251 779
138fd251
TB
780static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
781{
782 struct vring_virtqueue *vq = to_vvq(_vq);
783
784 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
e593bf97 785 vq->split.vring.used->idx);
138fd251
TB
786}
787
138fd251 788static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
7ab358c2
MT
789{
790 struct vring_virtqueue *vq = to_vvq(_vq);
791 u16 bufs;
792
793 START_USE(vq);
794
795 /* We optimistically turn back on interrupts, then check if there was
796 * more to do. */
797 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
798 * either clear the flags bit or point the event index at the next
0ea1e4a6 799 * entry. Always update the event index to keep code simple. */
e593bf97
TB
800 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
801 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 802 if (!vq->event)
e593bf97
TB
803 vq->split.vring.avail->flags =
804 cpu_to_virtio16(_vq->vdev,
805 vq->split.avail_flags_shadow);
f277ec42 806 }
7ab358c2 807 /* TODO: tune this threshold */
e593bf97 808 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
788e5b3a
MT
809
810 virtio_store_mb(vq->weak_barriers,
e593bf97 811 &vring_used_event(&vq->split.vring),
788e5b3a
MT
812 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
813
e593bf97
TB
814 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
815 - vq->last_used_idx) > bufs)) {
7ab358c2
MT
816 END_USE(vq);
817 return false;
818 }
819
820 END_USE(vq);
821 return true;
822}
7ab358c2 823
138fd251 824static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
c021eac4
SM
825{
826 struct vring_virtqueue *vq = to_vvq(_vq);
827 unsigned int i;
828 void *buf;
829
830 START_USE(vq);
831
e593bf97 832 for (i = 0; i < vq->split.vring.num; i++) {
cbeedb72 833 if (!vq->split.desc_state[i].data)
c021eac4 834 continue;
138fd251 835 /* detach_buf_split clears data, so grab it now. */
cbeedb72 836 buf = vq->split.desc_state[i].data;
138fd251 837 detach_buf_split(vq, i, NULL);
e593bf97
TB
838 vq->split.avail_idx_shadow--;
839 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
840 vq->split.avail_idx_shadow);
c021eac4
SM
841 END_USE(vq);
842 return buf;
843 }
844 /* That should have freed everything. */
e593bf97 845 BUG_ON(vq->vq.num_free != vq->split.vring.num);
c021eac4
SM
846
847 END_USE(vq);
848 return NULL;
849}
138fd251 850
d79dca75
TB
851static struct virtqueue *vring_create_virtqueue_split(
852 unsigned int index,
853 unsigned int num,
854 unsigned int vring_align,
855 struct virtio_device *vdev,
856 bool weak_barriers,
857 bool may_reduce_num,
858 bool context,
859 bool (*notify)(struct virtqueue *),
860 void (*callback)(struct virtqueue *),
861 const char *name)
862{
863 struct virtqueue *vq;
864 void *queue = NULL;
865 dma_addr_t dma_addr;
866 size_t queue_size_in_bytes;
867 struct vring vring;
868
869 /* We assume num is a power of 2. */
870 if (num & (num - 1)) {
871 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
872 return NULL;
873 }
874
875 /* TODO: allocate each queue chunk individually */
876 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
877 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
878 &dma_addr,
879 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
880 if (queue)
881 break;
cf94db21
CH
882 if (!may_reduce_num)
883 return NULL;
d79dca75
TB
884 }
885
886 if (!num)
887 return NULL;
888
889 if (!queue) {
890 /* Try to get a single page. You are my only hope! */
891 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
892 &dma_addr, GFP_KERNEL|__GFP_ZERO);
893 }
894 if (!queue)
895 return NULL;
896
897 queue_size_in_bytes = vring_size(num, vring_align);
898 vring_init(&vring, num, queue, vring_align);
899
900 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
901 notify, callback, name);
902 if (!vq) {
903 vring_free_queue(vdev, queue_size_in_bytes, queue,
904 dma_addr);
905 return NULL;
906 }
907
908 to_vvq(vq)->split.queue_dma_addr = dma_addr;
909 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
910 to_vvq(vq)->we_own_ring = true;
911
912 return vq;
913}
914
e6f633e5 915
1ce9e605
TB
916/*
917 * Packed ring specific functions - *_packed().
918 */
919
920static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
921 struct vring_desc_extra_packed *state)
922{
923 u16 flags;
924
925 if (!vq->use_dma_api)
926 return;
927
928 flags = state->flags;
929
930 if (flags & VRING_DESC_F_INDIRECT) {
931 dma_unmap_single(vring_dma_dev(vq),
932 state->addr, state->len,
933 (flags & VRING_DESC_F_WRITE) ?
934 DMA_FROM_DEVICE : DMA_TO_DEVICE);
935 } else {
936 dma_unmap_page(vring_dma_dev(vq),
937 state->addr, state->len,
938 (flags & VRING_DESC_F_WRITE) ?
939 DMA_FROM_DEVICE : DMA_TO_DEVICE);
940 }
941}
942
943static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
944 struct vring_packed_desc *desc)
945{
946 u16 flags;
947
948 if (!vq->use_dma_api)
949 return;
950
951 flags = le16_to_cpu(desc->flags);
952
953 if (flags & VRING_DESC_F_INDIRECT) {
954 dma_unmap_single(vring_dma_dev(vq),
955 le64_to_cpu(desc->addr),
956 le32_to_cpu(desc->len),
957 (flags & VRING_DESC_F_WRITE) ?
958 DMA_FROM_DEVICE : DMA_TO_DEVICE);
959 } else {
960 dma_unmap_page(vring_dma_dev(vq),
961 le64_to_cpu(desc->addr),
962 le32_to_cpu(desc->len),
963 (flags & VRING_DESC_F_WRITE) ?
964 DMA_FROM_DEVICE : DMA_TO_DEVICE);
965 }
966}
967
968static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
969 gfp_t gfp)
970{
971 struct vring_packed_desc *desc;
972
973 /*
974 * We require lowmem mappings for the descriptors because
975 * otherwise virt_to_phys will give us bogus addresses in the
976 * virtqueue.
977 */
978 gfp &= ~__GFP_HIGHMEM;
979
980 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
981
982 return desc;
983}
984
985static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
986 struct scatterlist *sgs[],
987 unsigned int total_sg,
988 unsigned int out_sgs,
989 unsigned int in_sgs,
990 void *data,
991 gfp_t gfp)
992{
993 struct vring_packed_desc *desc;
994 struct scatterlist *sg;
995 unsigned int i, n, err_idx;
996 u16 head, id;
997 dma_addr_t addr;
998
999 head = vq->packed.next_avail_idx;
1000 desc = alloc_indirect_packed(total_sg, gfp);
1001
1002 if (unlikely(vq->vq.num_free < 1)) {
1003 pr_debug("Can't add buf len 1 - avail = 0\n");
df0bfe75 1004 kfree(desc);
1ce9e605
TB
1005 END_USE(vq);
1006 return -ENOSPC;
1007 }
1008
1009 i = 0;
1010 id = vq->free_head;
1011 BUG_ON(id == vq->packed.vring.num);
1012
1013 for (n = 0; n < out_sgs + in_sgs; n++) {
1014 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1015 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1016 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1017 if (vring_mapping_error(vq, addr))
1018 goto unmap_release;
1019
1020 desc[i].flags = cpu_to_le16(n < out_sgs ?
1021 0 : VRING_DESC_F_WRITE);
1022 desc[i].addr = cpu_to_le64(addr);
1023 desc[i].len = cpu_to_le32(sg->length);
1024 i++;
1025 }
1026 }
1027
1028 /* Now that the indirect table is filled in, map it. */
1029 addr = vring_map_single(vq, desc,
1030 total_sg * sizeof(struct vring_packed_desc),
1031 DMA_TO_DEVICE);
1032 if (vring_mapping_error(vq, addr))
1033 goto unmap_release;
1034
1035 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1036 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1037 sizeof(struct vring_packed_desc));
1038 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1039
1040 if (vq->use_dma_api) {
1041 vq->packed.desc_extra[id].addr = addr;
1042 vq->packed.desc_extra[id].len = total_sg *
1043 sizeof(struct vring_packed_desc);
1044 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1045 vq->packed.avail_used_flags;
1046 }
1047
1048 /*
1049 * A driver MUST NOT make the first descriptor in the list
1050 * available before all subsequent descriptors comprising
1051 * the list are made available.
1052 */
1053 virtio_wmb(vq->weak_barriers);
1054 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1055 vq->packed.avail_used_flags);
1056
1057 /* We're using some buffers from the free list. */
1058 vq->vq.num_free -= 1;
1059
1060 /* Update free pointer */
1061 n = head + 1;
1062 if (n >= vq->packed.vring.num) {
1063 n = 0;
1064 vq->packed.avail_wrap_counter ^= 1;
1065 vq->packed.avail_used_flags ^=
1066 1 << VRING_PACKED_DESC_F_AVAIL |
1067 1 << VRING_PACKED_DESC_F_USED;
1068 }
1069 vq->packed.next_avail_idx = n;
1070 vq->free_head = vq->packed.desc_state[id].next;
1071
1072 /* Store token and indirect buffer state. */
1073 vq->packed.desc_state[id].num = 1;
1074 vq->packed.desc_state[id].data = data;
1075 vq->packed.desc_state[id].indir_desc = desc;
1076 vq->packed.desc_state[id].last = id;
1077
1078 vq->num_added += 1;
1079
1080 pr_debug("Added buffer head %i to %p\n", head, vq);
1081 END_USE(vq);
1082
1083 return 0;
1084
1085unmap_release:
1086 err_idx = i;
1087
1088 for (i = 0; i < err_idx; i++)
1089 vring_unmap_desc_packed(vq, &desc[i]);
1090
1091 kfree(desc);
1092
1093 END_USE(vq);
f7728002 1094 return -ENOMEM;
1ce9e605
TB
1095}
1096
1097static inline int virtqueue_add_packed(struct virtqueue *_vq,
1098 struct scatterlist *sgs[],
1099 unsigned int total_sg,
1100 unsigned int out_sgs,
1101 unsigned int in_sgs,
1102 void *data,
1103 void *ctx,
1104 gfp_t gfp)
1105{
1106 struct vring_virtqueue *vq = to_vvq(_vq);
1107 struct vring_packed_desc *desc;
1108 struct scatterlist *sg;
1109 unsigned int i, n, c, descs_used, err_idx;
3f649ab7
KC
1110 __le16 head_flags, flags;
1111 u16 head, id, prev, curr, avail_used_flags;
1ce9e605
TB
1112
1113 START_USE(vq);
1114
1115 BUG_ON(data == NULL);
1116 BUG_ON(ctx && vq->indirect);
1117
1118 if (unlikely(vq->broken)) {
1119 END_USE(vq);
1120 return -EIO;
1121 }
1122
1123 LAST_ADD_TIME_UPDATE(vq);
1124
1125 BUG_ON(total_sg == 0);
1126
1127 if (virtqueue_use_indirect(_vq, total_sg))
1128 return virtqueue_add_indirect_packed(vq, sgs, total_sg,
1129 out_sgs, in_sgs, data, gfp);
1130
1131 head = vq->packed.next_avail_idx;
1132 avail_used_flags = vq->packed.avail_used_flags;
1133
1134 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1135
1136 desc = vq->packed.vring.desc;
1137 i = head;
1138 descs_used = total_sg;
1139
1140 if (unlikely(vq->vq.num_free < descs_used)) {
1141 pr_debug("Can't add buf len %i - avail = %i\n",
1142 descs_used, vq->vq.num_free);
1143 END_USE(vq);
1144 return -ENOSPC;
1145 }
1146
1147 id = vq->free_head;
1148 BUG_ON(id == vq->packed.vring.num);
1149
1150 curr = id;
1151 c = 0;
1152 for (n = 0; n < out_sgs + in_sgs; n++) {
1153 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1154 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1155 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1156 if (vring_mapping_error(vq, addr))
1157 goto unmap_release;
1158
1159 flags = cpu_to_le16(vq->packed.avail_used_flags |
1160 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1161 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1162 if (i == head)
1163 head_flags = flags;
1164 else
1165 desc[i].flags = flags;
1166
1167 desc[i].addr = cpu_to_le64(addr);
1168 desc[i].len = cpu_to_le32(sg->length);
1169 desc[i].id = cpu_to_le16(id);
1170
1171 if (unlikely(vq->use_dma_api)) {
1172 vq->packed.desc_extra[curr].addr = addr;
1173 vq->packed.desc_extra[curr].len = sg->length;
1174 vq->packed.desc_extra[curr].flags =
1175 le16_to_cpu(flags);
1176 }
1177 prev = curr;
1178 curr = vq->packed.desc_state[curr].next;
1179
1180 if ((unlikely(++i >= vq->packed.vring.num))) {
1181 i = 0;
1182 vq->packed.avail_used_flags ^=
1183 1 << VRING_PACKED_DESC_F_AVAIL |
1184 1 << VRING_PACKED_DESC_F_USED;
1185 }
1186 }
1187 }
1188
1189 if (i < head)
1190 vq->packed.avail_wrap_counter ^= 1;
1191
1192 /* We're using some buffers from the free list. */
1193 vq->vq.num_free -= descs_used;
1194
1195 /* Update free pointer */
1196 vq->packed.next_avail_idx = i;
1197 vq->free_head = curr;
1198
1199 /* Store token. */
1200 vq->packed.desc_state[id].num = descs_used;
1201 vq->packed.desc_state[id].data = data;
1202 vq->packed.desc_state[id].indir_desc = ctx;
1203 vq->packed.desc_state[id].last = prev;
1204
1205 /*
1206 * A driver MUST NOT make the first descriptor in the list
1207 * available before all subsequent descriptors comprising
1208 * the list are made available.
1209 */
1210 virtio_wmb(vq->weak_barriers);
1211 vq->packed.vring.desc[head].flags = head_flags;
1212 vq->num_added += descs_used;
1213
1214 pr_debug("Added buffer head %i to %p\n", head, vq);
1215 END_USE(vq);
1216
1217 return 0;
1218
1219unmap_release:
1220 err_idx = i;
1221 i = head;
1222
1223 vq->packed.avail_used_flags = avail_used_flags;
1224
1225 for (n = 0; n < total_sg; n++) {
1226 if (i == err_idx)
1227 break;
1228 vring_unmap_desc_packed(vq, &desc[i]);
1229 i++;
1230 if (i >= vq->packed.vring.num)
1231 i = 0;
1232 }
1233
1234 END_USE(vq);
1235 return -EIO;
1236}
1237
1238static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1239{
1240 struct vring_virtqueue *vq = to_vvq(_vq);
f51f9826 1241 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1ce9e605
TB
1242 bool needs_kick;
1243 union {
1244 struct {
1245 __le16 off_wrap;
1246 __le16 flags;
1247 };
1248 u32 u32;
1249 } snapshot;
1250
1251 START_USE(vq);
1252
1253 /*
1254 * We need to expose the new flags value before checking notification
1255 * suppressions.
1256 */
1257 virtio_mb(vq->weak_barriers);
1258
f51f9826
TB
1259 old = vq->packed.next_avail_idx - vq->num_added;
1260 new = vq->packed.next_avail_idx;
1ce9e605
TB
1261 vq->num_added = 0;
1262
1263 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1264 flags = le16_to_cpu(snapshot.flags);
1265
1266 LAST_ADD_TIME_CHECK(vq);
1267 LAST_ADD_TIME_INVALID(vq);
1268
f51f9826
TB
1269 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1270 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1271 goto out;
1272 }
1273
1274 off_wrap = le16_to_cpu(snapshot.off_wrap);
1275
1276 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1277 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1278 if (wrap_counter != vq->packed.avail_wrap_counter)
1279 event_idx -= vq->packed.vring.num;
1280
1281 needs_kick = vring_need_event(event_idx, new, old);
1282out:
1ce9e605
TB
1283 END_USE(vq);
1284 return needs_kick;
1285}
1286
1287static void detach_buf_packed(struct vring_virtqueue *vq,
1288 unsigned int id, void **ctx)
1289{
1290 struct vring_desc_state_packed *state = NULL;
1291 struct vring_packed_desc *desc;
1292 unsigned int i, curr;
1293
1294 state = &vq->packed.desc_state[id];
1295
1296 /* Clear data ptr. */
1297 state->data = NULL;
1298
1299 vq->packed.desc_state[state->last].next = vq->free_head;
1300 vq->free_head = id;
1301 vq->vq.num_free += state->num;
1302
1303 if (unlikely(vq->use_dma_api)) {
1304 curr = id;
1305 for (i = 0; i < state->num; i++) {
1306 vring_unmap_state_packed(vq,
1307 &vq->packed.desc_extra[curr]);
1308 curr = vq->packed.desc_state[curr].next;
1309 }
1310 }
1311
1312 if (vq->indirect) {
1313 u32 len;
1314
1315 /* Free the indirect table, if any, now that it's unmapped. */
1316 desc = state->indir_desc;
1317 if (!desc)
1318 return;
1319
1320 if (vq->use_dma_api) {
1321 len = vq->packed.desc_extra[id].len;
1322 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1323 i++)
1324 vring_unmap_desc_packed(vq, &desc[i]);
1325 }
1326 kfree(desc);
1327 state->indir_desc = NULL;
1328 } else if (ctx) {
1329 *ctx = state->indir_desc;
1330 }
1331}
1332
1333static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1334 u16 idx, bool used_wrap_counter)
1335{
1336 bool avail, used;
1337 u16 flags;
1338
1339 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1340 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1341 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1342
1343 return avail == used && used == used_wrap_counter;
1344}
1345
1346static inline bool more_used_packed(const struct vring_virtqueue *vq)
1347{
1348 return is_used_desc_packed(vq, vq->last_used_idx,
1349 vq->packed.used_wrap_counter);
1350}
1351
1352static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1353 unsigned int *len,
1354 void **ctx)
1355{
1356 struct vring_virtqueue *vq = to_vvq(_vq);
1357 u16 last_used, id;
1358 void *ret;
1359
1360 START_USE(vq);
1361
1362 if (unlikely(vq->broken)) {
1363 END_USE(vq);
1364 return NULL;
1365 }
1366
1367 if (!more_used_packed(vq)) {
1368 pr_debug("No more buffers in queue\n");
1369 END_USE(vq);
1370 return NULL;
1371 }
1372
1373 /* Only get used elements after they have been exposed by host. */
1374 virtio_rmb(vq->weak_barriers);
1375
1376 last_used = vq->last_used_idx;
1377 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1378 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1379
1380 if (unlikely(id >= vq->packed.vring.num)) {
1381 BAD_RING(vq, "id %u out of range\n", id);
1382 return NULL;
1383 }
1384 if (unlikely(!vq->packed.desc_state[id].data)) {
1385 BAD_RING(vq, "id %u is not a head!\n", id);
1386 return NULL;
1387 }
1388
1389 /* detach_buf_packed clears data, so grab it now. */
1390 ret = vq->packed.desc_state[id].data;
1391 detach_buf_packed(vq, id, ctx);
1392
1393 vq->last_used_idx += vq->packed.desc_state[id].num;
1394 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1395 vq->last_used_idx -= vq->packed.vring.num;
1396 vq->packed.used_wrap_counter ^= 1;
1397 }
1398
f51f9826
TB
1399 /*
1400 * If we expect an interrupt for the next entry, tell host
1401 * by writing event index and flush out the write before
1402 * the read in the next get_buf call.
1403 */
1404 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1405 virtio_store_mb(vq->weak_barriers,
1406 &vq->packed.vring.driver->off_wrap,
1407 cpu_to_le16(vq->last_used_idx |
1408 (vq->packed.used_wrap_counter <<
1409 VRING_PACKED_EVENT_F_WRAP_CTR)));
1410
1ce9e605
TB
1411 LAST_ADD_TIME_INVALID(vq);
1412
1413 END_USE(vq);
1414 return ret;
1415}
1416
1417static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1418{
1419 struct vring_virtqueue *vq = to_vvq(_vq);
1420
1421 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1422 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1423 vq->packed.vring.driver->flags =
1424 cpu_to_le16(vq->packed.event_flags_shadow);
1425 }
1426}
1427
1428static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1429{
1430 struct vring_virtqueue *vq = to_vvq(_vq);
1431
1432 START_USE(vq);
1433
1434 /*
1435 * We optimistically turn back on interrupts, then check if there was
1436 * more to do.
1437 */
1438
f51f9826
TB
1439 if (vq->event) {
1440 vq->packed.vring.driver->off_wrap =
1441 cpu_to_le16(vq->last_used_idx |
1442 (vq->packed.used_wrap_counter <<
1443 VRING_PACKED_EVENT_F_WRAP_CTR));
1444 /*
1445 * We need to update event offset and event wrap
1446 * counter first before updating event flags.
1447 */
1448 virtio_wmb(vq->weak_barriers);
1449 }
1450
1ce9e605 1451 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
f51f9826
TB
1452 vq->packed.event_flags_shadow = vq->event ?
1453 VRING_PACKED_EVENT_FLAG_DESC :
1454 VRING_PACKED_EVENT_FLAG_ENABLE;
1ce9e605
TB
1455 vq->packed.vring.driver->flags =
1456 cpu_to_le16(vq->packed.event_flags_shadow);
1457 }
1458
1459 END_USE(vq);
1460 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1461 VRING_PACKED_EVENT_F_WRAP_CTR);
1462}
1463
1464static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1465{
1466 struct vring_virtqueue *vq = to_vvq(_vq);
1467 bool wrap_counter;
1468 u16 used_idx;
1469
1470 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1471 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1472
1473 return is_used_desc_packed(vq, used_idx, wrap_counter);
1474}
1475
1476static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1477{
1478 struct vring_virtqueue *vq = to_vvq(_vq);
1479 u16 used_idx, wrap_counter;
f51f9826 1480 u16 bufs;
1ce9e605
TB
1481
1482 START_USE(vq);
1483
1484 /*
1485 * We optimistically turn back on interrupts, then check if there was
1486 * more to do.
1487 */
1488
f51f9826
TB
1489 if (vq->event) {
1490 /* TODO: tune this threshold */
1491 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1492 wrap_counter = vq->packed.used_wrap_counter;
1493
1494 used_idx = vq->last_used_idx + bufs;
1495 if (used_idx >= vq->packed.vring.num) {
1496 used_idx -= vq->packed.vring.num;
1497 wrap_counter ^= 1;
1498 }
1499
1500 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1501 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1502
1503 /*
1504 * We need to update event offset and event wrap
1505 * counter first before updating event flags.
1506 */
1507 virtio_wmb(vq->weak_barriers);
f51f9826 1508 }
1ce9e605
TB
1509
1510 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
f51f9826
TB
1511 vq->packed.event_flags_shadow = vq->event ?
1512 VRING_PACKED_EVENT_FLAG_DESC :
1513 VRING_PACKED_EVENT_FLAG_ENABLE;
1ce9e605
TB
1514 vq->packed.vring.driver->flags =
1515 cpu_to_le16(vq->packed.event_flags_shadow);
1516 }
1517
1518 /*
1519 * We need to update event suppression structure first
1520 * before re-checking for more used buffers.
1521 */
1522 virtio_mb(vq->weak_barriers);
1523
40ce7919
ML
1524 if (is_used_desc_packed(vq,
1525 vq->last_used_idx,
1526 vq->packed.used_wrap_counter)) {
1ce9e605
TB
1527 END_USE(vq);
1528 return false;
1529 }
1530
1531 END_USE(vq);
1532 return true;
1533}
1534
1535static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1536{
1537 struct vring_virtqueue *vq = to_vvq(_vq);
1538 unsigned int i;
1539 void *buf;
1540
1541 START_USE(vq);
1542
1543 for (i = 0; i < vq->packed.vring.num; i++) {
1544 if (!vq->packed.desc_state[i].data)
1545 continue;
1546 /* detach_buf clears data, so grab it now. */
1547 buf = vq->packed.desc_state[i].data;
1548 detach_buf_packed(vq, i, NULL);
1549 END_USE(vq);
1550 return buf;
1551 }
1552 /* That should have freed everything. */
1553 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1554
1555 END_USE(vq);
1556 return NULL;
1557}
1558
1559static struct virtqueue *vring_create_virtqueue_packed(
1560 unsigned int index,
1561 unsigned int num,
1562 unsigned int vring_align,
1563 struct virtio_device *vdev,
1564 bool weak_barriers,
1565 bool may_reduce_num,
1566 bool context,
1567 bool (*notify)(struct virtqueue *),
1568 void (*callback)(struct virtqueue *),
1569 const char *name)
1570{
1571 struct vring_virtqueue *vq;
1572 struct vring_packed_desc *ring;
1573 struct vring_packed_desc_event *driver, *device;
1574 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1575 size_t ring_size_in_bytes, event_size_in_bytes;
1576 unsigned int i;
1577
1578 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1579
1580 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1581 &ring_dma_addr,
1582 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1583 if (!ring)
1584 goto err_ring;
1585
1586 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1587
1588 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1589 &driver_event_dma_addr,
1590 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1591 if (!driver)
1592 goto err_driver;
1593
1594 device = vring_alloc_queue(vdev, event_size_in_bytes,
1595 &device_event_dma_addr,
1596 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1597 if (!device)
1598 goto err_device;
1599
1600 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1601 if (!vq)
1602 goto err_vq;
1603
1604 vq->vq.callback = callback;
1605 vq->vq.vdev = vdev;
1606 vq->vq.name = name;
1607 vq->vq.num_free = num;
1608 vq->vq.index = index;
1609 vq->we_own_ring = true;
1610 vq->notify = notify;
1611 vq->weak_barriers = weak_barriers;
1612 vq->broken = false;
1613 vq->last_used_idx = 0;
8d622d21 1614 vq->event_triggered = false;
1ce9e605
TB
1615 vq->num_added = 0;
1616 vq->packed_ring = true;
1617 vq->use_dma_api = vring_use_dma_api(vdev);
1ce9e605
TB
1618#ifdef DEBUG
1619 vq->in_use = false;
1620 vq->last_add_time_valid = false;
1621#endif
1622
1623 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1624 !context;
1625 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1626
45383fb0
TB
1627 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1628 vq->weak_barriers = false;
1629
1ce9e605
TB
1630 vq->packed.ring_dma_addr = ring_dma_addr;
1631 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1632 vq->packed.device_event_dma_addr = device_event_dma_addr;
1633
1634 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1635 vq->packed.event_size_in_bytes = event_size_in_bytes;
1636
1637 vq->packed.vring.num = num;
1638 vq->packed.vring.desc = ring;
1639 vq->packed.vring.driver = driver;
1640 vq->packed.vring.device = device;
1641
1642 vq->packed.next_avail_idx = 0;
1643 vq->packed.avail_wrap_counter = 1;
1644 vq->packed.used_wrap_counter = 1;
1645 vq->packed.event_flags_shadow = 0;
1646 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1647
1648 vq->packed.desc_state = kmalloc_array(num,
1649 sizeof(struct vring_desc_state_packed),
1650 GFP_KERNEL);
1651 if (!vq->packed.desc_state)
1652 goto err_desc_state;
1653
1654 memset(vq->packed.desc_state, 0,
1655 num * sizeof(struct vring_desc_state_packed));
1656
1657 /* Put everything in free lists. */
1658 vq->free_head = 0;
1659 for (i = 0; i < num-1; i++)
1660 vq->packed.desc_state[i].next = i + 1;
1661
1662 vq->packed.desc_extra = kmalloc_array(num,
1663 sizeof(struct vring_desc_extra_packed),
1664 GFP_KERNEL);
1665 if (!vq->packed.desc_extra)
1666 goto err_desc_extra;
1667
1668 memset(vq->packed.desc_extra, 0,
1669 num * sizeof(struct vring_desc_extra_packed));
1670
1671 /* No callback? Tell other side not to bother us. */
1672 if (!callback) {
1673 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1674 vq->packed.vring.driver->flags =
1675 cpu_to_le16(vq->packed.event_flags_shadow);
1676 }
1677
e152d8af 1678 list_add_tail(&vq->vq.list, &vdev->vqs);
1ce9e605
TB
1679 return &vq->vq;
1680
1681err_desc_extra:
1682 kfree(vq->packed.desc_state);
1683err_desc_state:
1684 kfree(vq);
1685err_vq:
ae93d8ea 1686 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1ce9e605 1687err_device:
ae93d8ea 1688 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1ce9e605
TB
1689err_driver:
1690 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1691err_ring:
1692 return NULL;
1693}
1694
1695
e6f633e5
TB
1696/*
1697 * Generic functions and exported symbols.
1698 */
1699
1700static inline int virtqueue_add(struct virtqueue *_vq,
1701 struct scatterlist *sgs[],
1702 unsigned int total_sg,
1703 unsigned int out_sgs,
1704 unsigned int in_sgs,
1705 void *data,
1706 void *ctx,
1707 gfp_t gfp)
1708{
1ce9e605
TB
1709 struct vring_virtqueue *vq = to_vvq(_vq);
1710
1711 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1712 out_sgs, in_sgs, data, ctx, gfp) :
1713 virtqueue_add_split(_vq, sgs, total_sg,
1714 out_sgs, in_sgs, data, ctx, gfp);
e6f633e5
TB
1715}
1716
1717/**
1718 * virtqueue_add_sgs - expose buffers to other end
a5581206 1719 * @_vq: the struct virtqueue we're talking about.
e6f633e5 1720 * @sgs: array of terminated scatterlists.
a5581206
JB
1721 * @out_sgs: the number of scatterlists readable by other side
1722 * @in_sgs: the number of scatterlists which are writable (after readable ones)
e6f633e5
TB
1723 * @data: the token identifying the buffer.
1724 * @gfp: how to do memory allocations (if necessary).
1725 *
1726 * Caller must ensure we don't call this with other virtqueue operations
1727 * at the same time (except where noted).
1728 *
1729 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1730 */
1731int virtqueue_add_sgs(struct virtqueue *_vq,
1732 struct scatterlist *sgs[],
1733 unsigned int out_sgs,
1734 unsigned int in_sgs,
1735 void *data,
1736 gfp_t gfp)
1737{
1738 unsigned int i, total_sg = 0;
1739
1740 /* Count them first. */
1741 for (i = 0; i < out_sgs + in_sgs; i++) {
1742 struct scatterlist *sg;
1743
1744 for (sg = sgs[i]; sg; sg = sg_next(sg))
1745 total_sg++;
1746 }
1747 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1748 data, NULL, gfp);
1749}
1750EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1751
1752/**
1753 * virtqueue_add_outbuf - expose output buffers to other end
1754 * @vq: the struct virtqueue we're talking about.
1755 * @sg: scatterlist (must be well-formed and terminated!)
1756 * @num: the number of entries in @sg readable by other side
1757 * @data: the token identifying the buffer.
1758 * @gfp: how to do memory allocations (if necessary).
1759 *
1760 * Caller must ensure we don't call this with other virtqueue operations
1761 * at the same time (except where noted).
1762 *
1763 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1764 */
1765int virtqueue_add_outbuf(struct virtqueue *vq,
1766 struct scatterlist *sg, unsigned int num,
1767 void *data,
1768 gfp_t gfp)
1769{
1770 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1771}
1772EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1773
1774/**
1775 * virtqueue_add_inbuf - expose input buffers to other end
1776 * @vq: the struct virtqueue we're talking about.
1777 * @sg: scatterlist (must be well-formed and terminated!)
1778 * @num: the number of entries in @sg writable by other side
1779 * @data: the token identifying the buffer.
1780 * @gfp: how to do memory allocations (if necessary).
1781 *
1782 * Caller must ensure we don't call this with other virtqueue operations
1783 * at the same time (except where noted).
1784 *
1785 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1786 */
1787int virtqueue_add_inbuf(struct virtqueue *vq,
1788 struct scatterlist *sg, unsigned int num,
1789 void *data,
1790 gfp_t gfp)
1791{
1792 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1793}
1794EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1795
1796/**
1797 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1798 * @vq: the struct virtqueue we're talking about.
1799 * @sg: scatterlist (must be well-formed and terminated!)
1800 * @num: the number of entries in @sg writable by other side
1801 * @data: the token identifying the buffer.
1802 * @ctx: extra context for the token
1803 * @gfp: how to do memory allocations (if necessary).
1804 *
1805 * Caller must ensure we don't call this with other virtqueue operations
1806 * at the same time (except where noted).
1807 *
1808 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1809 */
1810int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1811 struct scatterlist *sg, unsigned int num,
1812 void *data,
1813 void *ctx,
1814 gfp_t gfp)
1815{
1816 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1817}
1818EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1819
1820/**
1821 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
a5581206 1822 * @_vq: the struct virtqueue
e6f633e5
TB
1823 *
1824 * Instead of virtqueue_kick(), you can do:
1825 * if (virtqueue_kick_prepare(vq))
1826 * virtqueue_notify(vq);
1827 *
1828 * This is sometimes useful because the virtqueue_kick_prepare() needs
1829 * to be serialized, but the actual virtqueue_notify() call does not.
1830 */
1831bool virtqueue_kick_prepare(struct virtqueue *_vq)
1832{
1ce9e605
TB
1833 struct vring_virtqueue *vq = to_vvq(_vq);
1834
1835 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1836 virtqueue_kick_prepare_split(_vq);
e6f633e5
TB
1837}
1838EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1839
1840/**
1841 * virtqueue_notify - second half of split virtqueue_kick call.
a5581206 1842 * @_vq: the struct virtqueue
e6f633e5
TB
1843 *
1844 * This does not need to be serialized.
1845 *
1846 * Returns false if host notify failed or queue is broken, otherwise true.
1847 */
1848bool virtqueue_notify(struct virtqueue *_vq)
1849{
1850 struct vring_virtqueue *vq = to_vvq(_vq);
1851
1852 if (unlikely(vq->broken))
1853 return false;
1854
1855 /* Prod other side to tell it about changes. */
1856 if (!vq->notify(_vq)) {
1857 vq->broken = true;
1858 return false;
1859 }
1860 return true;
1861}
1862EXPORT_SYMBOL_GPL(virtqueue_notify);
1863
1864/**
1865 * virtqueue_kick - update after add_buf
1866 * @vq: the struct virtqueue
1867 *
1868 * After one or more virtqueue_add_* calls, invoke this to kick
1869 * the other side.
1870 *
1871 * Caller must ensure we don't call this with other virtqueue
1872 * operations at the same time (except where noted).
1873 *
1874 * Returns false if kick failed, otherwise true.
1875 */
1876bool virtqueue_kick(struct virtqueue *vq)
1877{
1878 if (virtqueue_kick_prepare(vq))
1879 return virtqueue_notify(vq);
1880 return true;
1881}
1882EXPORT_SYMBOL_GPL(virtqueue_kick);
1883
1884/**
31c11db6 1885 * virtqueue_get_buf_ctx - get the next used buffer
a5581206 1886 * @_vq: the struct virtqueue we're talking about.
e6f633e5 1887 * @len: the length written into the buffer
a5581206 1888 * @ctx: extra context for the token
e6f633e5
TB
1889 *
1890 * If the device wrote data into the buffer, @len will be set to the
1891 * amount written. This means you don't need to clear the buffer
1892 * beforehand to ensure there's no data leakage in the case of short
1893 * writes.
1894 *
1895 * Caller must ensure we don't call this with other virtqueue
1896 * operations at the same time (except where noted).
1897 *
1898 * Returns NULL if there are no used buffers, or the "data" token
1899 * handed to virtqueue_add_*().
1900 */
1901void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1902 void **ctx)
1903{
1ce9e605
TB
1904 struct vring_virtqueue *vq = to_vvq(_vq);
1905
1906 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1907 virtqueue_get_buf_ctx_split(_vq, len, ctx);
e6f633e5
TB
1908}
1909EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1910
1911void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1912{
1913 return virtqueue_get_buf_ctx(_vq, len, NULL);
1914}
1915EXPORT_SYMBOL_GPL(virtqueue_get_buf);
e6f633e5
TB
1916/**
1917 * virtqueue_disable_cb - disable callbacks
a5581206 1918 * @_vq: the struct virtqueue we're talking about.
e6f633e5
TB
1919 *
1920 * Note that this is not necessarily synchronous, hence unreliable and only
1921 * useful as an optimization.
1922 *
1923 * Unlike other operations, this need not be serialized.
1924 */
1925void virtqueue_disable_cb(struct virtqueue *_vq)
1926{
1ce9e605
TB
1927 struct vring_virtqueue *vq = to_vvq(_vq);
1928
8d622d21
MT
1929 /* If device triggered an event already it won't trigger one again:
1930 * no need to disable.
1931 */
1932 if (vq->event_triggered)
1933 return;
1934
1ce9e605
TB
1935 if (vq->packed_ring)
1936 virtqueue_disable_cb_packed(_vq);
1937 else
1938 virtqueue_disable_cb_split(_vq);
e6f633e5
TB
1939}
1940EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1941
1942/**
1943 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
a5581206 1944 * @_vq: the struct virtqueue we're talking about.
e6f633e5
TB
1945 *
1946 * This re-enables callbacks; it returns current queue state
1947 * in an opaque unsigned value. This value should be later tested by
1948 * virtqueue_poll, to detect a possible race between the driver checking for
1949 * more work, and enabling callbacks.
1950 *
1951 * Caller must ensure we don't call this with other virtqueue
1952 * operations at the same time (except where noted).
1953 */
1954unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1955{
1ce9e605
TB
1956 struct vring_virtqueue *vq = to_vvq(_vq);
1957
8d622d21
MT
1958 if (vq->event_triggered)
1959 vq->event_triggered = false;
1960
1ce9e605
TB
1961 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1962 virtqueue_enable_cb_prepare_split(_vq);
e6f633e5
TB
1963}
1964EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1965
1966/**
1967 * virtqueue_poll - query pending used buffers
a5581206 1968 * @_vq: the struct virtqueue we're talking about.
e6f633e5
TB
1969 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1970 *
1971 * Returns "true" if there are pending used buffers in the queue.
1972 *
1973 * This does not need to be serialized.
1974 */
1975bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1976{
1977 struct vring_virtqueue *vq = to_vvq(_vq);
1978
481a0d74
MW
1979 if (unlikely(vq->broken))
1980 return false;
1981
e6f633e5 1982 virtio_mb(vq->weak_barriers);
1ce9e605
TB
1983 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1984 virtqueue_poll_split(_vq, last_used_idx);
e6f633e5
TB
1985}
1986EXPORT_SYMBOL_GPL(virtqueue_poll);
1987
1988/**
1989 * virtqueue_enable_cb - restart callbacks after disable_cb.
a5581206 1990 * @_vq: the struct virtqueue we're talking about.
e6f633e5
TB
1991 *
1992 * This re-enables callbacks; it returns "false" if there are pending
1993 * buffers in the queue, to detect a possible race between the driver
1994 * checking for more work, and enabling callbacks.
1995 *
1996 * Caller must ensure we don't call this with other virtqueue
1997 * operations at the same time (except where noted).
1998 */
1999bool virtqueue_enable_cb(struct virtqueue *_vq)
2000{
2001 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
2002
2003 return !virtqueue_poll(_vq, last_used_idx);
2004}
2005EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2006
2007/**
2008 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
a5581206 2009 * @_vq: the struct virtqueue we're talking about.
e6f633e5
TB
2010 *
2011 * This re-enables callbacks but hints to the other side to delay
2012 * interrupts until most of the available buffers have been processed;
2013 * it returns "false" if there are many pending buffers in the queue,
2014 * to detect a possible race between the driver checking for more work,
2015 * and enabling callbacks.
2016 *
2017 * Caller must ensure we don't call this with other virtqueue
2018 * operations at the same time (except where noted).
2019 */
2020bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2021{
1ce9e605
TB
2022 struct vring_virtqueue *vq = to_vvq(_vq);
2023
8d622d21
MT
2024 if (vq->event_triggered)
2025 vq->event_triggered = false;
2026
1ce9e605
TB
2027 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2028 virtqueue_enable_cb_delayed_split(_vq);
e6f633e5
TB
2029}
2030EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2031
138fd251
TB
2032/**
2033 * virtqueue_detach_unused_buf - detach first unused buffer
a5581206 2034 * @_vq: the struct virtqueue we're talking about.
138fd251
TB
2035 *
2036 * Returns NULL or the "data" token handed to virtqueue_add_*().
2037 * This is not valid on an active queue; it is useful only for device
2038 * shutdown.
2039 */
2040void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2041{
1ce9e605
TB
2042 struct vring_virtqueue *vq = to_vvq(_vq);
2043
2044 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2045 virtqueue_detach_unused_buf_split(_vq);
138fd251 2046}
7c5e9ed0 2047EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac4 2048
138fd251
TB
2049static inline bool more_used(const struct vring_virtqueue *vq)
2050{
1ce9e605 2051 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
138fd251
TB
2052}
2053
0a8a69dd
RR
2054irqreturn_t vring_interrupt(int irq, void *_vq)
2055{
2056 struct vring_virtqueue *vq = to_vvq(_vq);
2057
2058 if (!more_used(vq)) {
2059 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2060 return IRQ_NONE;
2061 }
2062
2063 if (unlikely(vq->broken))
2064 return IRQ_HANDLED;
2065
8d622d21
MT
2066 /* Just a hint for performance: so it's ok that this can be racy! */
2067 if (vq->event)
2068 vq->event_triggered = true;
2069
0a8a69dd 2070 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
2071 if (vq->vq.callback)
2072 vq->vq.callback(&vq->vq);
0a8a69dd
RR
2073
2074 return IRQ_HANDLED;
2075}
c6fd4701 2076EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd 2077
1ce9e605 2078/* Only available for split ring */
2a2d1382
AL
2079struct virtqueue *__vring_new_virtqueue(unsigned int index,
2080 struct vring vring,
2081 struct virtio_device *vdev,
2082 bool weak_barriers,
f94682dd 2083 bool context,
2a2d1382
AL
2084 bool (*notify)(struct virtqueue *),
2085 void (*callback)(struct virtqueue *),
2086 const char *name)
0a8a69dd 2087{
0a8a69dd 2088 unsigned int i;
2a2d1382 2089 struct vring_virtqueue *vq;
0a8a69dd 2090
1ce9e605
TB
2091 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2092 return NULL;
2093
cbeedb72 2094 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
0a8a69dd
RR
2095 if (!vq)
2096 return NULL;
2097
1ce9e605 2098 vq->packed_ring = false;
0a8a69dd
RR
2099 vq->vq.callback = callback;
2100 vq->vq.vdev = vdev;
9499f5e7 2101 vq->vq.name = name;
2a2d1382 2102 vq->vq.num_free = vring.num;
06ca287d 2103 vq->vq.index = index;
2a2d1382 2104 vq->we_own_ring = false;
0a8a69dd 2105 vq->notify = notify;
7b21e34f 2106 vq->weak_barriers = weak_barriers;
0a8a69dd
RR
2107 vq->broken = false;
2108 vq->last_used_idx = 0;
8d622d21 2109 vq->event_triggered = false;
0a8a69dd 2110 vq->num_added = 0;
fb3fba6b 2111 vq->use_dma_api = vring_use_dma_api(vdev);
0a8a69dd
RR
2112#ifdef DEBUG
2113 vq->in_use = false;
e93300b1 2114 vq->last_add_time_valid = false;
0a8a69dd
RR
2115#endif
2116
5a08b04f
MT
2117 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2118 !context;
a5c262c5 2119 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9d 2120
45383fb0
TB
2121 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2122 vq->weak_barriers = false;
2123
d79dca75
TB
2124 vq->split.queue_dma_addr = 0;
2125 vq->split.queue_size_in_bytes = 0;
2126
e593bf97
TB
2127 vq->split.vring = vring;
2128 vq->split.avail_flags_shadow = 0;
2129 vq->split.avail_idx_shadow = 0;
2130
0a8a69dd 2131 /* No callback? Tell other side not to bother us. */
f277ec42 2132 if (!callback) {
e593bf97 2133 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 2134 if (!vq->event)
e593bf97
TB
2135 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2136 vq->split.avail_flags_shadow);
f277ec42 2137 }
0a8a69dd 2138
cbeedb72
TB
2139 vq->split.desc_state = kmalloc_array(vring.num,
2140 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2141 if (!vq->split.desc_state) {
2142 kfree(vq);
2143 return NULL;
2144 }
2145
0a8a69dd 2146 /* Put everything in free lists. */
0a8a69dd 2147 vq->free_head = 0;
2a2d1382 2148 for (i = 0; i < vring.num-1; i++)
e593bf97 2149 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
cbeedb72
TB
2150 memset(vq->split.desc_state, 0, vring.num *
2151 sizeof(struct vring_desc_state_split));
0a8a69dd 2152
e152d8af 2153 list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd
RR
2154 return &vq->vq;
2155}
2a2d1382
AL
2156EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2157
2a2d1382
AL
2158struct virtqueue *vring_create_virtqueue(
2159 unsigned int index,
2160 unsigned int num,
2161 unsigned int vring_align,
2162 struct virtio_device *vdev,
2163 bool weak_barriers,
2164 bool may_reduce_num,
f94682dd 2165 bool context,
2a2d1382
AL
2166 bool (*notify)(struct virtqueue *),
2167 void (*callback)(struct virtqueue *),
2168 const char *name)
2169{
1ce9e605
TB
2170
2171 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2172 return vring_create_virtqueue_packed(index, num, vring_align,
2173 vdev, weak_barriers, may_reduce_num,
2174 context, notify, callback, name);
2175
d79dca75
TB
2176 return vring_create_virtqueue_split(index, num, vring_align,
2177 vdev, weak_barriers, may_reduce_num,
2178 context, notify, callback, name);
2a2d1382
AL
2179}
2180EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2181
1ce9e605 2182/* Only available for split ring */
2a2d1382
AL
2183struct virtqueue *vring_new_virtqueue(unsigned int index,
2184 unsigned int num,
2185 unsigned int vring_align,
2186 struct virtio_device *vdev,
2187 bool weak_barriers,
f94682dd 2188 bool context,
2a2d1382
AL
2189 void *pages,
2190 bool (*notify)(struct virtqueue *vq),
2191 void (*callback)(struct virtqueue *vq),
2192 const char *name)
2193{
2194 struct vring vring;
1ce9e605
TB
2195
2196 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2197 return NULL;
2198
2a2d1382 2199 vring_init(&vring, num, pages, vring_align);
f94682dd 2200 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2a2d1382
AL
2201 notify, callback, name);
2202}
c6fd4701 2203EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd 2204
2a2d1382 2205void vring_del_virtqueue(struct virtqueue *_vq)
0a8a69dd 2206{
2a2d1382
AL
2207 struct vring_virtqueue *vq = to_vvq(_vq);
2208
2209 if (vq->we_own_ring) {
1ce9e605
TB
2210 if (vq->packed_ring) {
2211 vring_free_queue(vq->vq.vdev,
2212 vq->packed.ring_size_in_bytes,
2213 vq->packed.vring.desc,
2214 vq->packed.ring_dma_addr);
2215
2216 vring_free_queue(vq->vq.vdev,
2217 vq->packed.event_size_in_bytes,
2218 vq->packed.vring.driver,
2219 vq->packed.driver_event_dma_addr);
2220
2221 vring_free_queue(vq->vq.vdev,
2222 vq->packed.event_size_in_bytes,
2223 vq->packed.vring.device,
2224 vq->packed.device_event_dma_addr);
2225
2226 kfree(vq->packed.desc_state);
2227 kfree(vq->packed.desc_extra);
2228 } else {
2229 vring_free_queue(vq->vq.vdev,
2230 vq->split.queue_size_in_bytes,
2231 vq->split.vring.desc,
2232 vq->split.queue_dma_addr);
1ce9e605 2233 }
2a2d1382 2234 }
f13f09a1
SA
2235 if (!vq->packed_ring)
2236 kfree(vq->split.desc_state);
2a2d1382
AL
2237 list_del(&_vq->list);
2238 kfree(vq);
0a8a69dd 2239}
c6fd4701 2240EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 2241
e34f8725
RR
2242/* Manipulates transport-specific feature bits. */
2243void vring_transport_features(struct virtio_device *vdev)
2244{
2245 unsigned int i;
2246
2247 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2248 switch (i) {
9fa29b9d
MM
2249 case VIRTIO_RING_F_INDIRECT_DESC:
2250 break;
a5c262c5
MT
2251 case VIRTIO_RING_F_EVENT_IDX:
2252 break;
747ae34a
MT
2253 case VIRTIO_F_VERSION_1:
2254 break;
321bd212 2255 case VIRTIO_F_ACCESS_PLATFORM:
1a937693 2256 break;
f959a128
TB
2257 case VIRTIO_F_RING_PACKED:
2258 break;
45383fb0
TB
2259 case VIRTIO_F_ORDER_PLATFORM:
2260 break;
e34f8725
RR
2261 default:
2262 /* We don't understand this bit. */
e16e12be 2263 __virtio_clear_bit(vdev, i);
e34f8725
RR
2264 }
2265 }
2266}
2267EXPORT_SYMBOL_GPL(vring_transport_features);
2268
5dfc1762
RR
2269/**
2270 * virtqueue_get_vring_size - return the size of the virtqueue's vring
a5581206 2271 * @_vq: the struct virtqueue containing the vring of interest.
5dfc1762
RR
2272 *
2273 * Returns the size of the vring. This is mainly used for boasting to
2274 * userspace. Unlike other operations, this need not be serialized.
2275 */
8f9f4668
RJ
2276unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2277{
2278
2279 struct vring_virtqueue *vq = to_vvq(_vq);
2280
1ce9e605 2281 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
8f9f4668
RJ
2282}
2283EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2284
b3b32c94
HG
2285bool virtqueue_is_broken(struct virtqueue *_vq)
2286{
2287 struct vring_virtqueue *vq = to_vvq(_vq);
2288
2289 return vq->broken;
2290}
2291EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2292
e2dcdfe9
RR
2293/*
2294 * This should prevent the device from being used, allowing drivers to
2295 * recover. You may need to grab appropriate locks to flush.
2296 */
2297void virtio_break_device(struct virtio_device *dev)
2298{
2299 struct virtqueue *_vq;
2300
2301 list_for_each_entry(_vq, &dev->vqs, list) {
2302 struct vring_virtqueue *vq = to_vvq(_vq);
2303 vq->broken = true;
2304 }
2305}
2306EXPORT_SYMBOL_GPL(virtio_break_device);
2307
2a2d1382 2308dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
89062652
CH
2309{
2310 struct vring_virtqueue *vq = to_vvq(_vq);
2311
2a2d1382
AL
2312 BUG_ON(!vq->we_own_ring);
2313
1ce9e605
TB
2314 if (vq->packed_ring)
2315 return vq->packed.ring_dma_addr;
2316
d79dca75 2317 return vq->split.queue_dma_addr;
89062652 2318}
2a2d1382 2319EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
89062652 2320
2a2d1382 2321dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
89062652
CH
2322{
2323 struct vring_virtqueue *vq = to_vvq(_vq);
2324
2a2d1382
AL
2325 BUG_ON(!vq->we_own_ring);
2326
1ce9e605
TB
2327 if (vq->packed_ring)
2328 return vq->packed.driver_event_dma_addr;
2329
d79dca75 2330 return vq->split.queue_dma_addr +
e593bf97 2331 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2a2d1382
AL
2332}
2333EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2334
2335dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2336{
2337 struct vring_virtqueue *vq = to_vvq(_vq);
2338
2339 BUG_ON(!vq->we_own_ring);
2340
1ce9e605
TB
2341 if (vq->packed_ring)
2342 return vq->packed.device_event_dma_addr;
2343
d79dca75 2344 return vq->split.queue_dma_addr +
e593bf97 2345 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2a2d1382
AL
2346}
2347EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2348
1ce9e605 2349/* Only available for split ring */
2a2d1382
AL
2350const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2351{
e593bf97 2352 return &to_vvq(vq)->split.vring;
89062652 2353}
2a2d1382 2354EXPORT_SYMBOL_GPL(virtqueue_get_vring);
89062652 2355
c6fd4701 2356MODULE_LICENSE("GPL");