virtio_ring: allocate desc state for split ring separately
[linux-2.6-block.git] / drivers / virtio / virtio_ring.c
CommitLineData
0a8a69dd
RR
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
e34f8725 21#include <linux/virtio_config.h>
0a8a69dd 22#include <linux/device.h>
5a0e3ad6 23#include <linux/slab.h>
b5a2c4f1 24#include <linux/module.h>
e93300b1 25#include <linux/hrtimer.h>
780bc790 26#include <linux/dma-mapping.h>
78fe3987 27#include <xen/xen.h>
0a8a69dd
RR
28
29#ifdef DEBUG
30/* For development, we want to crash whenever the ring is screwed. */
9499f5e7
RR
31#define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
c5f841f1
RR
37/* Caller is supposed to guarantee no reentry. */
38#define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
9499f5e7
RR
41 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
c5f841f1 43 (_vq)->in_use = __LINE__; \
9499f5e7 44 } while (0)
3a35ce7d 45#define END_USE(_vq) \
97a545ab 46 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
4d6a105e
TB
47#define LAST_ADD_TIME_UPDATE(_vq) \
48 do { \
49 ktime_t now = ktime_get(); \
50 \
51 /* No kick or get, with .1 second between? Warn. */ \
52 if ((_vq)->last_add_time_valid) \
53 WARN_ON(ktime_to_ms(ktime_sub(now, \
54 (_vq)->last_add_time)) > 100); \
55 (_vq)->last_add_time = now; \
56 (_vq)->last_add_time_valid = true; \
57 } while (0)
58#define LAST_ADD_TIME_CHECK(_vq) \
59 do { \
60 if ((_vq)->last_add_time_valid) { \
61 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
62 (_vq)->last_add_time)) > 100); \
63 } \
64 } while (0)
65#define LAST_ADD_TIME_INVALID(_vq) \
66 ((_vq)->last_add_time_valid = false)
0a8a69dd 67#else
9499f5e7
RR
68#define BAD_RING(_vq, fmt, args...) \
69 do { \
70 dev_err(&_vq->vq.vdev->dev, \
71 "%s:"fmt, (_vq)->vq.name, ##args); \
72 (_vq)->broken = true; \
73 } while (0)
0a8a69dd
RR
74#define START_USE(vq)
75#define END_USE(vq)
4d6a105e
TB
76#define LAST_ADD_TIME_UPDATE(vq)
77#define LAST_ADD_TIME_CHECK(vq)
78#define LAST_ADD_TIME_INVALID(vq)
0a8a69dd
RR
79#endif
80
cbeedb72 81struct vring_desc_state_split {
780bc790
AL
82 void *data; /* Data for callback. */
83 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
84};
85
43b4f721 86struct vring_virtqueue {
0a8a69dd
RR
87 struct virtqueue vq;
88
7b21e34f
RR
89 /* Can we use weak barriers? */
90 bool weak_barriers;
91
0a8a69dd
RR
92 /* Other side has made a mess, don't try any more. */
93 bool broken;
94
9fa29b9d
MM
95 /* Host supports indirect buffers */
96 bool indirect;
97
a5c262c5
MT
98 /* Host publishes avail event idx */
99 bool event;
100
0a8a69dd
RR
101 /* Head of free buffer list. */
102 unsigned int free_head;
103 /* Number we've added since last sync. */
104 unsigned int num_added;
105
106 /* Last used index we've seen. */
1bc4953e 107 u16 last_used_idx;
0a8a69dd 108
e593bf97
TB
109 struct {
110 /* Actual memory layout for this queue */
111 struct vring vring;
f277ec42 112
e593bf97
TB
113 /* Last written value to avail->flags */
114 u16 avail_flags_shadow;
115
116 /* Last written value to avail->idx in guest byte order */
117 u16 avail_idx_shadow;
cbeedb72
TB
118
119 /* Per-descriptor state. */
120 struct vring_desc_state_split *desc_state;
e593bf97 121 } split;
f277ec42 122
0a8a69dd 123 /* How to notify other side. FIXME: commonalize hcalls! */
46f9c2b9 124 bool (*notify)(struct virtqueue *vq);
0a8a69dd 125
2a2d1382
AL
126 /* DMA, allocation, and size information */
127 bool we_own_ring;
128 size_t queue_size_in_bytes;
129 dma_addr_t queue_dma_addr;
130
0a8a69dd
RR
131#ifdef DEBUG
132 /* They're supposed to lock for us. */
133 unsigned int in_use;
e93300b1
RR
134
135 /* Figure out if their kicks are too delayed. */
136 bool last_add_time_valid;
137 ktime_t last_add_time;
0a8a69dd 138#endif
0a8a69dd
RR
139};
140
e6f633e5
TB
141
142/*
143 * Helpers.
144 */
145
0a8a69dd
RR
146#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
147
2f18c2d1
TB
148static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
149 unsigned int total_sg)
150{
151 struct vring_virtqueue *vq = to_vvq(_vq);
152
153 /*
154 * If the host supports indirect descriptor tables, and we have multiple
155 * buffers, then go indirect. FIXME: tune this threshold
156 */
157 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
158}
159
d26c96c8 160/*
1a937693
MT
161 * Modern virtio devices have feature bits to specify whether they need a
162 * quirk and bypass the IOMMU. If not there, just use the DMA API.
163 *
164 * If there, the interaction between virtio and DMA API is messy.
d26c96c8
AL
165 *
166 * On most systems with virtio, physical addresses match bus addresses,
167 * and it doesn't particularly matter whether we use the DMA API.
168 *
169 * On some systems, including Xen and any system with a physical device
170 * that speaks virtio behind a physical IOMMU, we must use the DMA API
171 * for virtio DMA to work at all.
172 *
173 * On other systems, including SPARC and PPC64, virtio-pci devices are
174 * enumerated as though they are behind an IOMMU, but the virtio host
175 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
176 * there or somehow map everything as the identity.
177 *
178 * For the time being, we preserve historic behavior and bypass the DMA
179 * API.
1a937693
MT
180 *
181 * TODO: install a per-device DMA ops structure that does the right thing
182 * taking into account all the above quirks, and use the DMA API
183 * unconditionally on data path.
d26c96c8
AL
184 */
185
186static bool vring_use_dma_api(struct virtio_device *vdev)
187{
1a937693
MT
188 if (!virtio_has_iommu_quirk(vdev))
189 return true;
190
191 /* Otherwise, we are left to guess. */
78fe3987
AL
192 /*
193 * In theory, it's possible to have a buggy QEMU-supposed
194 * emulated Q35 IOMMU and Xen enabled at the same time. On
195 * such a configuration, virtio has never worked and will
196 * not work without an even larger kludge. Instead, enable
197 * the DMA API if we're a Xen guest, which at least allows
198 * all of the sensible Xen configurations to work correctly.
199 */
200 if (xen_domain())
201 return true;
202
d26c96c8
AL
203 return false;
204}
205
780bc790
AL
206/*
207 * The DMA ops on various arches are rather gnarly right now, and
208 * making all of the arch DMA ops work on the vring device itself
209 * is a mess. For now, we use the parent device for DMA ops.
210 */
75bfa81b 211static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
780bc790
AL
212{
213 return vq->vq.vdev->dev.parent;
214}
215
216/* Map one sg entry. */
217static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
218 struct scatterlist *sg,
219 enum dma_data_direction direction)
220{
221 if (!vring_use_dma_api(vq->vq.vdev))
222 return (dma_addr_t)sg_phys(sg);
223
224 /*
225 * We can't use dma_map_sg, because we don't use scatterlists in
226 * the way it expects (we don't guarantee that the scatterlist
227 * will exist for the lifetime of the mapping).
228 */
229 return dma_map_page(vring_dma_dev(vq),
230 sg_page(sg), sg->offset, sg->length,
231 direction);
232}
233
234static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
235 void *cpu_addr, size_t size,
236 enum dma_data_direction direction)
237{
238 if (!vring_use_dma_api(vq->vq.vdev))
239 return (dma_addr_t)virt_to_phys(cpu_addr);
240
241 return dma_map_single(vring_dma_dev(vq),
242 cpu_addr, size, direction);
243}
244
e6f633e5
TB
245static int vring_mapping_error(const struct vring_virtqueue *vq,
246 dma_addr_t addr)
247{
248 if (!vring_use_dma_api(vq->vq.vdev))
249 return 0;
250
251 return dma_mapping_error(vring_dma_dev(vq), addr);
252}
253
254
255/*
256 * Split ring specific functions - *_split().
257 */
258
138fd251
TB
259static void vring_unmap_one_split(const struct vring_virtqueue *vq,
260 struct vring_desc *desc)
780bc790
AL
261{
262 u16 flags;
263
264 if (!vring_use_dma_api(vq->vq.vdev))
265 return;
266
267 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
268
269 if (flags & VRING_DESC_F_INDIRECT) {
270 dma_unmap_single(vring_dma_dev(vq),
271 virtio64_to_cpu(vq->vq.vdev, desc->addr),
272 virtio32_to_cpu(vq->vq.vdev, desc->len),
273 (flags & VRING_DESC_F_WRITE) ?
274 DMA_FROM_DEVICE : DMA_TO_DEVICE);
275 } else {
276 dma_unmap_page(vring_dma_dev(vq),
277 virtio64_to_cpu(vq->vq.vdev, desc->addr),
278 virtio32_to_cpu(vq->vq.vdev, desc->len),
279 (flags & VRING_DESC_F_WRITE) ?
280 DMA_FROM_DEVICE : DMA_TO_DEVICE);
281 }
282}
283
138fd251
TB
284static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
285 unsigned int total_sg,
286 gfp_t gfp)
9fa29b9d
MM
287{
288 struct vring_desc *desc;
b25bd251 289 unsigned int i;
9fa29b9d 290
b92b1b89
WD
291 /*
292 * We require lowmem mappings for the descriptors because
293 * otherwise virt_to_phys will give us bogus addresses in the
294 * virtqueue.
295 */
82107539 296 gfp &= ~__GFP_HIGHMEM;
b92b1b89 297
6da2ec56 298 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
9fa29b9d 299 if (!desc)
b25bd251 300 return NULL;
9fa29b9d 301
b25bd251 302 for (i = 0; i < total_sg; i++)
00e6f3d9 303 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
b25bd251 304 return desc;
9fa29b9d
MM
305}
306
138fd251
TB
307static inline int virtqueue_add_split(struct virtqueue *_vq,
308 struct scatterlist *sgs[],
309 unsigned int total_sg,
310 unsigned int out_sgs,
311 unsigned int in_sgs,
312 void *data,
313 void *ctx,
314 gfp_t gfp)
0a8a69dd
RR
315{
316 struct vring_virtqueue *vq = to_vvq(_vq);
13816c76 317 struct scatterlist *sg;
b25bd251 318 struct vring_desc *desc;
780bc790 319 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
1fe9b6fe 320 int head;
b25bd251 321 bool indirect;
0a8a69dd 322
9fa29b9d
MM
323 START_USE(vq);
324
0a8a69dd 325 BUG_ON(data == NULL);
5a08b04f 326 BUG_ON(ctx && vq->indirect);
9fa29b9d 327
70670444
RR
328 if (unlikely(vq->broken)) {
329 END_USE(vq);
330 return -EIO;
331 }
332
4d6a105e 333 LAST_ADD_TIME_UPDATE(vq);
e93300b1 334
b25bd251
RR
335 BUG_ON(total_sg == 0);
336
337 head = vq->free_head;
338
2f18c2d1 339 if (virtqueue_use_indirect(_vq, total_sg))
138fd251 340 desc = alloc_indirect_split(_vq, total_sg, gfp);
44ed8089 341 else {
b25bd251 342 desc = NULL;
e593bf97 343 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
44ed8089 344 }
b25bd251
RR
345
346 if (desc) {
347 /* Use a single buffer which doesn't continue */
780bc790 348 indirect = true;
b25bd251
RR
349 /* Set up rest to use this indirect table. */
350 i = 0;
351 descs_used = 1;
b25bd251 352 } else {
780bc790 353 indirect = false;
e593bf97 354 desc = vq->split.vring.desc;
b25bd251
RR
355 i = head;
356 descs_used = total_sg;
9fa29b9d
MM
357 }
358
b25bd251 359 if (vq->vq.num_free < descs_used) {
0a8a69dd 360 pr_debug("Can't add buf len %i - avail = %i\n",
b25bd251 361 descs_used, vq->vq.num_free);
44653eae
RR
362 /* FIXME: for historical reasons, we force a notify here if
363 * there are outgoing parts to the buffer. Presumably the
364 * host should service the ring ASAP. */
13816c76 365 if (out_sgs)
44653eae 366 vq->notify(&vq->vq);
58625edf
WY
367 if (indirect)
368 kfree(desc);
0a8a69dd
RR
369 END_USE(vq);
370 return -ENOSPC;
371 }
372
13816c76 373 for (n = 0; n < out_sgs; n++) {
eeebf9b1 374 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
375 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
376 if (vring_mapping_error(vq, addr))
377 goto unmap_release;
378
00e6f3d9 379 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
780bc790 380 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 381 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 382 prev = i;
00e6f3d9 383 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 384 }
0a8a69dd 385 }
13816c76 386 for (; n < (out_sgs + in_sgs); n++) {
eeebf9b1 387 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
388 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
389 if (vring_mapping_error(vq, addr))
390 goto unmap_release;
391
00e6f3d9 392 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
780bc790 393 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 394 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 395 prev = i;
00e6f3d9 396 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 397 }
0a8a69dd
RR
398 }
399 /* Last one doesn't continue. */
00e6f3d9 400 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
0a8a69dd 401
780bc790
AL
402 if (indirect) {
403 /* Now that the indirect table is filled in, map it. */
404 dma_addr_t addr = vring_map_single(
405 vq, desc, total_sg * sizeof(struct vring_desc),
406 DMA_TO_DEVICE);
407 if (vring_mapping_error(vq, addr))
408 goto unmap_release;
409
e593bf97
TB
410 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
411 VRING_DESC_F_INDIRECT);
412 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
413 addr);
780bc790 414
e593bf97
TB
415 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
416 total_sg * sizeof(struct vring_desc));
780bc790
AL
417 }
418
419 /* We're using some buffers from the free list. */
420 vq->vq.num_free -= descs_used;
421
0a8a69dd 422 /* Update free pointer */
b25bd251 423 if (indirect)
e593bf97
TB
424 vq->free_head = virtio16_to_cpu(_vq->vdev,
425 vq->split.vring.desc[head].next);
b25bd251
RR
426 else
427 vq->free_head = i;
0a8a69dd 428
780bc790 429 /* Store token and indirect buffer state. */
cbeedb72 430 vq->split.desc_state[head].data = data;
780bc790 431 if (indirect)
cbeedb72 432 vq->split.desc_state[head].indir_desc = desc;
87646a34 433 else
cbeedb72 434 vq->split.desc_state[head].indir_desc = ctx;
0a8a69dd
RR
435
436 /* Put entry in available array (but don't update avail->idx until they
3b720b8c 437 * do sync). */
e593bf97
TB
438 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
439 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
0a8a69dd 440
ee7cd898
RR
441 /* Descriptors and available array need to be set before we expose the
442 * new available array entries. */
a9a0fef7 443 virtio_wmb(vq->weak_barriers);
e593bf97
TB
444 vq->split.avail_idx_shadow++;
445 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
446 vq->split.avail_idx_shadow);
ee7cd898
RR
447 vq->num_added++;
448
5e05bf58
TH
449 pr_debug("Added buffer head %i to %p\n", head, vq);
450 END_USE(vq);
451
ee7cd898
RR
452 /* This is very unlikely, but theoretically possible. Kick
453 * just in case. */
454 if (unlikely(vq->num_added == (1 << 16) - 1))
455 virtqueue_kick(_vq);
456
98e8c6bc 457 return 0;
780bc790
AL
458
459unmap_release:
460 err_idx = i;
461 i = head;
462
463 for (n = 0; n < total_sg; n++) {
464 if (i == err_idx)
465 break;
138fd251 466 vring_unmap_one_split(vq, &desc[i]);
e593bf97 467 i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
780bc790
AL
468 }
469
780bc790
AL
470 if (indirect)
471 kfree(desc);
472
3cc36f6e 473 END_USE(vq);
780bc790 474 return -EIO;
0a8a69dd 475}
13816c76 476
138fd251 477static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
0a8a69dd
RR
478{
479 struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5 480 u16 new, old;
41f0377f
RR
481 bool needs_kick;
482
0a8a69dd 483 START_USE(vq);
a72caae2
JW
484 /* We need to expose available array entries before checking avail
485 * event. */
a9a0fef7 486 virtio_mb(vq->weak_barriers);
0a8a69dd 487
e593bf97
TB
488 old = vq->split.avail_idx_shadow - vq->num_added;
489 new = vq->split.avail_idx_shadow;
0a8a69dd
RR
490 vq->num_added = 0;
491
4d6a105e
TB
492 LAST_ADD_TIME_CHECK(vq);
493 LAST_ADD_TIME_INVALID(vq);
e93300b1 494
41f0377f 495 if (vq->event) {
e593bf97
TB
496 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
497 vring_avail_event(&vq->split.vring)),
41f0377f
RR
498 new, old);
499 } else {
e593bf97
TB
500 needs_kick = !(vq->split.vring.used->flags &
501 cpu_to_virtio16(_vq->vdev,
502 VRING_USED_F_NO_NOTIFY));
41f0377f 503 }
0a8a69dd 504 END_USE(vq);
41f0377f
RR
505 return needs_kick;
506}
138fd251 507
138fd251
TB
508static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
509 void **ctx)
0a8a69dd 510{
780bc790 511 unsigned int i, j;
c60923cb 512 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
0a8a69dd
RR
513
514 /* Clear data ptr. */
cbeedb72 515 vq->split.desc_state[head].data = NULL;
0a8a69dd 516
780bc790 517 /* Put back on free list: unmap first-level descriptors and find end */
0a8a69dd 518 i = head;
9fa29b9d 519
e593bf97
TB
520 while (vq->split.vring.desc[i].flags & nextflag) {
521 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
522 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
06ca287d 523 vq->vq.num_free++;
0a8a69dd
RR
524 }
525
e593bf97
TB
526 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
527 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
528 vq->free_head);
0a8a69dd 529 vq->free_head = head;
780bc790 530
0a8a69dd 531 /* Plus final descriptor */
06ca287d 532 vq->vq.num_free++;
780bc790 533
5a08b04f 534 if (vq->indirect) {
cbeedb72
TB
535 struct vring_desc *indir_desc =
536 vq->split.desc_state[head].indir_desc;
5a08b04f
MT
537 u32 len;
538
539 /* Free the indirect table, if any, now that it's unmapped. */
540 if (!indir_desc)
541 return;
542
e593bf97
TB
543 len = virtio32_to_cpu(vq->vq.vdev,
544 vq->split.vring.desc[head].len);
780bc790 545
e593bf97 546 BUG_ON(!(vq->split.vring.desc[head].flags &
780bc790
AL
547 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
548 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
549
550 for (j = 0; j < len / sizeof(struct vring_desc); j++)
138fd251 551 vring_unmap_one_split(vq, &indir_desc[j]);
780bc790 552
5a08b04f 553 kfree(indir_desc);
cbeedb72 554 vq->split.desc_state[head].indir_desc = NULL;
5a08b04f 555 } else if (ctx) {
cbeedb72 556 *ctx = vq->split.desc_state[head].indir_desc;
780bc790 557 }
0a8a69dd
RR
558}
559
138fd251 560static inline bool more_used_split(const struct vring_virtqueue *vq)
0a8a69dd 561{
e593bf97
TB
562 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
563 vq->split.vring.used->idx);
0a8a69dd
RR
564}
565
138fd251
TB
566static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
567 unsigned int *len,
568 void **ctx)
0a8a69dd
RR
569{
570 struct vring_virtqueue *vq = to_vvq(_vq);
571 void *ret;
572 unsigned int i;
3b720b8c 573 u16 last_used;
0a8a69dd
RR
574
575 START_USE(vq);
576
5ef82752
RR
577 if (unlikely(vq->broken)) {
578 END_USE(vq);
579 return NULL;
580 }
581
138fd251 582 if (!more_used_split(vq)) {
0a8a69dd
RR
583 pr_debug("No more buffers in queue\n");
584 END_USE(vq);
585 return NULL;
586 }
587
2d61ba95 588 /* Only get used array entries after they have been exposed by host. */
a9a0fef7 589 virtio_rmb(vq->weak_barriers);
2d61ba95 590
e593bf97
TB
591 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
592 i = virtio32_to_cpu(_vq->vdev,
593 vq->split.vring.used->ring[last_used].id);
594 *len = virtio32_to_cpu(_vq->vdev,
595 vq->split.vring.used->ring[last_used].len);
0a8a69dd 596
e593bf97 597 if (unlikely(i >= vq->split.vring.num)) {
0a8a69dd
RR
598 BAD_RING(vq, "id %u out of range\n", i);
599 return NULL;
600 }
cbeedb72 601 if (unlikely(!vq->split.desc_state[i].data)) {
0a8a69dd
RR
602 BAD_RING(vq, "id %u is not a head!\n", i);
603 return NULL;
604 }
605
138fd251 606 /* detach_buf_split clears data, so grab it now. */
cbeedb72 607 ret = vq->split.desc_state[i].data;
138fd251 608 detach_buf_split(vq, i, ctx);
0a8a69dd 609 vq->last_used_idx++;
a5c262c5
MT
610 /* If we expect an interrupt for the next entry, tell host
611 * by writing event index and flush out the write before
612 * the read in the next get_buf call. */
e593bf97 613 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
788e5b3a 614 virtio_store_mb(vq->weak_barriers,
e593bf97 615 &vring_used_event(&vq->split.vring),
788e5b3a 616 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
a5c262c5 617
4d6a105e 618 LAST_ADD_TIME_INVALID(vq);
e93300b1 619
0a8a69dd
RR
620 END_USE(vq);
621 return ret;
622}
138fd251 623
138fd251 624static void virtqueue_disable_cb_split(struct virtqueue *_vq)
18445c4d
RR
625{
626 struct vring_virtqueue *vq = to_vvq(_vq);
627
e593bf97
TB
628 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
629 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 630 if (!vq->event)
e593bf97
TB
631 vq->split.vring.avail->flags =
632 cpu_to_virtio16(_vq->vdev,
633 vq->split.avail_flags_shadow);
f277ec42 634 }
18445c4d
RR
635}
636
138fd251 637static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
0a8a69dd
RR
638{
639 struct vring_virtqueue *vq = to_vvq(_vq);
cc229884 640 u16 last_used_idx;
0a8a69dd
RR
641
642 START_USE(vq);
0a8a69dd
RR
643
644 /* We optimistically turn back on interrupts, then check if there was
645 * more to do. */
a5c262c5
MT
646 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
647 * either clear the flags bit or point the event index at the next
648 * entry. Always do both to keep code simple. */
e593bf97
TB
649 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
650 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 651 if (!vq->event)
e593bf97
TB
652 vq->split.vring.avail->flags =
653 cpu_to_virtio16(_vq->vdev,
654 vq->split.avail_flags_shadow);
f277ec42 655 }
e593bf97
TB
656 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
657 last_used_idx = vq->last_used_idx);
cc229884
MT
658 END_USE(vq);
659 return last_used_idx;
660}
138fd251 661
138fd251
TB
662static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
663{
664 struct vring_virtqueue *vq = to_vvq(_vq);
665
666 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
e593bf97 667 vq->split.vring.used->idx);
138fd251
TB
668}
669
138fd251 670static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
7ab358c2
MT
671{
672 struct vring_virtqueue *vq = to_vvq(_vq);
673 u16 bufs;
674
675 START_USE(vq);
676
677 /* We optimistically turn back on interrupts, then check if there was
678 * more to do. */
679 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
680 * either clear the flags bit or point the event index at the next
0ea1e4a6 681 * entry. Always update the event index to keep code simple. */
e593bf97
TB
682 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
683 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 684 if (!vq->event)
e593bf97
TB
685 vq->split.vring.avail->flags =
686 cpu_to_virtio16(_vq->vdev,
687 vq->split.avail_flags_shadow);
f277ec42 688 }
7ab358c2 689 /* TODO: tune this threshold */
e593bf97 690 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
788e5b3a
MT
691
692 virtio_store_mb(vq->weak_barriers,
e593bf97 693 &vring_used_event(&vq->split.vring),
788e5b3a
MT
694 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
695
e593bf97
TB
696 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
697 - vq->last_used_idx) > bufs)) {
7ab358c2
MT
698 END_USE(vq);
699 return false;
700 }
701
702 END_USE(vq);
703 return true;
704}
7ab358c2 705
138fd251 706static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
c021eac4
SM
707{
708 struct vring_virtqueue *vq = to_vvq(_vq);
709 unsigned int i;
710 void *buf;
711
712 START_USE(vq);
713
e593bf97 714 for (i = 0; i < vq->split.vring.num; i++) {
cbeedb72 715 if (!vq->split.desc_state[i].data)
c021eac4 716 continue;
138fd251 717 /* detach_buf_split clears data, so grab it now. */
cbeedb72 718 buf = vq->split.desc_state[i].data;
138fd251 719 detach_buf_split(vq, i, NULL);
e593bf97
TB
720 vq->split.avail_idx_shadow--;
721 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
722 vq->split.avail_idx_shadow);
c021eac4
SM
723 END_USE(vq);
724 return buf;
725 }
726 /* That should have freed everything. */
e593bf97 727 BUG_ON(vq->vq.num_free != vq->split.vring.num);
c021eac4
SM
728
729 END_USE(vq);
730 return NULL;
731}
138fd251 732
e6f633e5
TB
733
734/*
735 * Generic functions and exported symbols.
736 */
737
738static inline int virtqueue_add(struct virtqueue *_vq,
739 struct scatterlist *sgs[],
740 unsigned int total_sg,
741 unsigned int out_sgs,
742 unsigned int in_sgs,
743 void *data,
744 void *ctx,
745 gfp_t gfp)
746{
747 return virtqueue_add_split(_vq, sgs, total_sg,
748 out_sgs, in_sgs, data, ctx, gfp);
749}
750
751/**
752 * virtqueue_add_sgs - expose buffers to other end
753 * @vq: the struct virtqueue we're talking about.
754 * @sgs: array of terminated scatterlists.
755 * @out_num: the number of scatterlists readable by other side
756 * @in_num: the number of scatterlists which are writable (after readable ones)
757 * @data: the token identifying the buffer.
758 * @gfp: how to do memory allocations (if necessary).
759 *
760 * Caller must ensure we don't call this with other virtqueue operations
761 * at the same time (except where noted).
762 *
763 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
764 */
765int virtqueue_add_sgs(struct virtqueue *_vq,
766 struct scatterlist *sgs[],
767 unsigned int out_sgs,
768 unsigned int in_sgs,
769 void *data,
770 gfp_t gfp)
771{
772 unsigned int i, total_sg = 0;
773
774 /* Count them first. */
775 for (i = 0; i < out_sgs + in_sgs; i++) {
776 struct scatterlist *sg;
777
778 for (sg = sgs[i]; sg; sg = sg_next(sg))
779 total_sg++;
780 }
781 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
782 data, NULL, gfp);
783}
784EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
785
786/**
787 * virtqueue_add_outbuf - expose output buffers to other end
788 * @vq: the struct virtqueue we're talking about.
789 * @sg: scatterlist (must be well-formed and terminated!)
790 * @num: the number of entries in @sg readable by other side
791 * @data: the token identifying the buffer.
792 * @gfp: how to do memory allocations (if necessary).
793 *
794 * Caller must ensure we don't call this with other virtqueue operations
795 * at the same time (except where noted).
796 *
797 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
798 */
799int virtqueue_add_outbuf(struct virtqueue *vq,
800 struct scatterlist *sg, unsigned int num,
801 void *data,
802 gfp_t gfp)
803{
804 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
805}
806EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
807
808/**
809 * virtqueue_add_inbuf - expose input buffers to other end
810 * @vq: the struct virtqueue we're talking about.
811 * @sg: scatterlist (must be well-formed and terminated!)
812 * @num: the number of entries in @sg writable by other side
813 * @data: the token identifying the buffer.
814 * @gfp: how to do memory allocations (if necessary).
815 *
816 * Caller must ensure we don't call this with other virtqueue operations
817 * at the same time (except where noted).
818 *
819 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
820 */
821int virtqueue_add_inbuf(struct virtqueue *vq,
822 struct scatterlist *sg, unsigned int num,
823 void *data,
824 gfp_t gfp)
825{
826 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
827}
828EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
829
830/**
831 * virtqueue_add_inbuf_ctx - expose input buffers to other end
832 * @vq: the struct virtqueue we're talking about.
833 * @sg: scatterlist (must be well-formed and terminated!)
834 * @num: the number of entries in @sg writable by other side
835 * @data: the token identifying the buffer.
836 * @ctx: extra context for the token
837 * @gfp: how to do memory allocations (if necessary).
838 *
839 * Caller must ensure we don't call this with other virtqueue operations
840 * at the same time (except where noted).
841 *
842 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
843 */
844int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
845 struct scatterlist *sg, unsigned int num,
846 void *data,
847 void *ctx,
848 gfp_t gfp)
849{
850 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
851}
852EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
853
854/**
855 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
856 * @vq: the struct virtqueue
857 *
858 * Instead of virtqueue_kick(), you can do:
859 * if (virtqueue_kick_prepare(vq))
860 * virtqueue_notify(vq);
861 *
862 * This is sometimes useful because the virtqueue_kick_prepare() needs
863 * to be serialized, but the actual virtqueue_notify() call does not.
864 */
865bool virtqueue_kick_prepare(struct virtqueue *_vq)
866{
867 return virtqueue_kick_prepare_split(_vq);
868}
869EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
870
871/**
872 * virtqueue_notify - second half of split virtqueue_kick call.
873 * @vq: the struct virtqueue
874 *
875 * This does not need to be serialized.
876 *
877 * Returns false if host notify failed or queue is broken, otherwise true.
878 */
879bool virtqueue_notify(struct virtqueue *_vq)
880{
881 struct vring_virtqueue *vq = to_vvq(_vq);
882
883 if (unlikely(vq->broken))
884 return false;
885
886 /* Prod other side to tell it about changes. */
887 if (!vq->notify(_vq)) {
888 vq->broken = true;
889 return false;
890 }
891 return true;
892}
893EXPORT_SYMBOL_GPL(virtqueue_notify);
894
895/**
896 * virtqueue_kick - update after add_buf
897 * @vq: the struct virtqueue
898 *
899 * After one or more virtqueue_add_* calls, invoke this to kick
900 * the other side.
901 *
902 * Caller must ensure we don't call this with other virtqueue
903 * operations at the same time (except where noted).
904 *
905 * Returns false if kick failed, otherwise true.
906 */
907bool virtqueue_kick(struct virtqueue *vq)
908{
909 if (virtqueue_kick_prepare(vq))
910 return virtqueue_notify(vq);
911 return true;
912}
913EXPORT_SYMBOL_GPL(virtqueue_kick);
914
915/**
916 * virtqueue_get_buf - get the next used buffer
917 * @vq: the struct virtqueue we're talking about.
918 * @len: the length written into the buffer
919 *
920 * If the device wrote data into the buffer, @len will be set to the
921 * amount written. This means you don't need to clear the buffer
922 * beforehand to ensure there's no data leakage in the case of short
923 * writes.
924 *
925 * Caller must ensure we don't call this with other virtqueue
926 * operations at the same time (except where noted).
927 *
928 * Returns NULL if there are no used buffers, or the "data" token
929 * handed to virtqueue_add_*().
930 */
931void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
932 void **ctx)
933{
934 return virtqueue_get_buf_ctx_split(_vq, len, ctx);
935}
936EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
937
938void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
939{
940 return virtqueue_get_buf_ctx(_vq, len, NULL);
941}
942EXPORT_SYMBOL_GPL(virtqueue_get_buf);
943
944/**
945 * virtqueue_disable_cb - disable callbacks
946 * @vq: the struct virtqueue we're talking about.
947 *
948 * Note that this is not necessarily synchronous, hence unreliable and only
949 * useful as an optimization.
950 *
951 * Unlike other operations, this need not be serialized.
952 */
953void virtqueue_disable_cb(struct virtqueue *_vq)
954{
955 virtqueue_disable_cb_split(_vq);
956}
957EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
958
959/**
960 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
961 * @vq: the struct virtqueue we're talking about.
962 *
963 * This re-enables callbacks; it returns current queue state
964 * in an opaque unsigned value. This value should be later tested by
965 * virtqueue_poll, to detect a possible race between the driver checking for
966 * more work, and enabling callbacks.
967 *
968 * Caller must ensure we don't call this with other virtqueue
969 * operations at the same time (except where noted).
970 */
971unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
972{
973 return virtqueue_enable_cb_prepare_split(_vq);
974}
975EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
976
977/**
978 * virtqueue_poll - query pending used buffers
979 * @vq: the struct virtqueue we're talking about.
980 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
981 *
982 * Returns "true" if there are pending used buffers in the queue.
983 *
984 * This does not need to be serialized.
985 */
986bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
987{
988 struct vring_virtqueue *vq = to_vvq(_vq);
989
990 virtio_mb(vq->weak_barriers);
991 return virtqueue_poll_split(_vq, last_used_idx);
992}
993EXPORT_SYMBOL_GPL(virtqueue_poll);
994
995/**
996 * virtqueue_enable_cb - restart callbacks after disable_cb.
997 * @vq: the struct virtqueue we're talking about.
998 *
999 * This re-enables callbacks; it returns "false" if there are pending
1000 * buffers in the queue, to detect a possible race between the driver
1001 * checking for more work, and enabling callbacks.
1002 *
1003 * Caller must ensure we don't call this with other virtqueue
1004 * operations at the same time (except where noted).
1005 */
1006bool virtqueue_enable_cb(struct virtqueue *_vq)
1007{
1008 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1009
1010 return !virtqueue_poll(_vq, last_used_idx);
1011}
1012EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
1013
1014/**
1015 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
1016 * @vq: the struct virtqueue we're talking about.
1017 *
1018 * This re-enables callbacks but hints to the other side to delay
1019 * interrupts until most of the available buffers have been processed;
1020 * it returns "false" if there are many pending buffers in the queue,
1021 * to detect a possible race between the driver checking for more work,
1022 * and enabling callbacks.
1023 *
1024 * Caller must ensure we don't call this with other virtqueue
1025 * operations at the same time (except where noted).
1026 */
1027bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
1028{
1029 return virtqueue_enable_cb_delayed_split(_vq);
1030}
1031EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
1032
138fd251
TB
1033/**
1034 * virtqueue_detach_unused_buf - detach first unused buffer
1035 * @vq: the struct virtqueue we're talking about.
1036 *
1037 * Returns NULL or the "data" token handed to virtqueue_add_*().
1038 * This is not valid on an active queue; it is useful only for device
1039 * shutdown.
1040 */
1041void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
1042{
1043 return virtqueue_detach_unused_buf_split(_vq);
1044}
7c5e9ed0 1045EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac4 1046
138fd251
TB
1047static inline bool more_used(const struct vring_virtqueue *vq)
1048{
1049 return more_used_split(vq);
1050}
1051
0a8a69dd
RR
1052irqreturn_t vring_interrupt(int irq, void *_vq)
1053{
1054 struct vring_virtqueue *vq = to_vvq(_vq);
1055
1056 if (!more_used(vq)) {
1057 pr_debug("virtqueue interrupt with no work for %p\n", vq);
1058 return IRQ_NONE;
1059 }
1060
1061 if (unlikely(vq->broken))
1062 return IRQ_HANDLED;
1063
1064 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
1065 if (vq->vq.callback)
1066 vq->vq.callback(&vq->vq);
0a8a69dd
RR
1067
1068 return IRQ_HANDLED;
1069}
c6fd4701 1070EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd 1071
2a2d1382
AL
1072struct virtqueue *__vring_new_virtqueue(unsigned int index,
1073 struct vring vring,
1074 struct virtio_device *vdev,
1075 bool weak_barriers,
f94682dd 1076 bool context,
2a2d1382
AL
1077 bool (*notify)(struct virtqueue *),
1078 void (*callback)(struct virtqueue *),
1079 const char *name)
0a8a69dd 1080{
0a8a69dd 1081 unsigned int i;
2a2d1382 1082 struct vring_virtqueue *vq;
0a8a69dd 1083
cbeedb72 1084 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
0a8a69dd
RR
1085 if (!vq)
1086 return NULL;
1087
0a8a69dd
RR
1088 vq->vq.callback = callback;
1089 vq->vq.vdev = vdev;
9499f5e7 1090 vq->vq.name = name;
2a2d1382 1091 vq->vq.num_free = vring.num;
06ca287d 1092 vq->vq.index = index;
2a2d1382
AL
1093 vq->we_own_ring = false;
1094 vq->queue_dma_addr = 0;
1095 vq->queue_size_in_bytes = 0;
0a8a69dd 1096 vq->notify = notify;
7b21e34f 1097 vq->weak_barriers = weak_barriers;
0a8a69dd
RR
1098 vq->broken = false;
1099 vq->last_used_idx = 0;
1100 vq->num_added = 0;
9499f5e7 1101 list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd
RR
1102#ifdef DEBUG
1103 vq->in_use = false;
e93300b1 1104 vq->last_add_time_valid = false;
0a8a69dd
RR
1105#endif
1106
5a08b04f
MT
1107 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1108 !context;
a5c262c5 1109 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9d 1110
e593bf97
TB
1111 vq->split.vring = vring;
1112 vq->split.avail_flags_shadow = 0;
1113 vq->split.avail_idx_shadow = 0;
1114
0a8a69dd 1115 /* No callback? Tell other side not to bother us. */
f277ec42 1116 if (!callback) {
e593bf97 1117 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6 1118 if (!vq->event)
e593bf97
TB
1119 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
1120 vq->split.avail_flags_shadow);
f277ec42 1121 }
0a8a69dd 1122
cbeedb72
TB
1123 vq->split.desc_state = kmalloc_array(vring.num,
1124 sizeof(struct vring_desc_state_split), GFP_KERNEL);
1125 if (!vq->split.desc_state) {
1126 kfree(vq);
1127 return NULL;
1128 }
1129
0a8a69dd 1130 /* Put everything in free lists. */
0a8a69dd 1131 vq->free_head = 0;
2a2d1382 1132 for (i = 0; i < vring.num-1; i++)
e593bf97 1133 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
cbeedb72
TB
1134 memset(vq->split.desc_state, 0, vring.num *
1135 sizeof(struct vring_desc_state_split));
0a8a69dd
RR
1136
1137 return &vq->vq;
1138}
2a2d1382
AL
1139EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1140
1141static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1142 dma_addr_t *dma_handle, gfp_t flag)
1143{
1144 if (vring_use_dma_api(vdev)) {
1145 return dma_alloc_coherent(vdev->dev.parent, size,
1146 dma_handle, flag);
1147 } else {
1148 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1149 if (queue) {
1150 phys_addr_t phys_addr = virt_to_phys(queue);
1151 *dma_handle = (dma_addr_t)phys_addr;
1152
1153 /*
1154 * Sanity check: make sure we dind't truncate
1155 * the address. The only arches I can find that
1156 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1157 * are certain non-highmem MIPS and x86
1158 * configurations, but these configurations
1159 * should never allocate physical pages above 32
1160 * bits, so this is fine. Just in case, throw a
1161 * warning and abort if we end up with an
1162 * unrepresentable address.
1163 */
1164 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1165 free_pages_exact(queue, PAGE_ALIGN(size));
1166 return NULL;
1167 }
1168 }
1169 return queue;
1170 }
1171}
1172
1173static void vring_free_queue(struct virtio_device *vdev, size_t size,
1174 void *queue, dma_addr_t dma_handle)
1175{
1176 if (vring_use_dma_api(vdev)) {
1177 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1178 } else {
1179 free_pages_exact(queue, PAGE_ALIGN(size));
1180 }
1181}
1182
1183struct virtqueue *vring_create_virtqueue(
1184 unsigned int index,
1185 unsigned int num,
1186 unsigned int vring_align,
1187 struct virtio_device *vdev,
1188 bool weak_barriers,
1189 bool may_reduce_num,
f94682dd 1190 bool context,
2a2d1382
AL
1191 bool (*notify)(struct virtqueue *),
1192 void (*callback)(struct virtqueue *),
1193 const char *name)
1194{
1195 struct virtqueue *vq;
e00f7bd2 1196 void *queue = NULL;
2a2d1382
AL
1197 dma_addr_t dma_addr;
1198 size_t queue_size_in_bytes;
1199 struct vring vring;
1200
1201 /* We assume num is a power of 2. */
1202 if (num & (num - 1)) {
1203 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1204 return NULL;
1205 }
1206
1207 /* TODO: allocate each queue chunk individually */
1208 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1209 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1210 &dma_addr,
1211 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1212 if (queue)
1213 break;
1214 }
1215
1216 if (!num)
1217 return NULL;
1218
1219 if (!queue) {
1220 /* Try to get a single page. You are my only hope! */
1221 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1222 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1223 }
1224 if (!queue)
1225 return NULL;
1226
1227 queue_size_in_bytes = vring_size(num, vring_align);
1228 vring_init(&vring, num, queue, vring_align);
1229
f94682dd 1230 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2a2d1382
AL
1231 notify, callback, name);
1232 if (!vq) {
1233 vring_free_queue(vdev, queue_size_in_bytes, queue,
1234 dma_addr);
1235 return NULL;
1236 }
1237
1238 to_vvq(vq)->queue_dma_addr = dma_addr;
1239 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1240 to_vvq(vq)->we_own_ring = true;
1241
1242 return vq;
1243}
1244EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1245
1246struct virtqueue *vring_new_virtqueue(unsigned int index,
1247 unsigned int num,
1248 unsigned int vring_align,
1249 struct virtio_device *vdev,
1250 bool weak_barriers,
f94682dd 1251 bool context,
2a2d1382
AL
1252 void *pages,
1253 bool (*notify)(struct virtqueue *vq),
1254 void (*callback)(struct virtqueue *vq),
1255 const char *name)
1256{
1257 struct vring vring;
1258 vring_init(&vring, num, pages, vring_align);
f94682dd 1259 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2a2d1382
AL
1260 notify, callback, name);
1261}
c6fd4701 1262EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd 1263
2a2d1382 1264void vring_del_virtqueue(struct virtqueue *_vq)
0a8a69dd 1265{
2a2d1382
AL
1266 struct vring_virtqueue *vq = to_vvq(_vq);
1267
1268 if (vq->we_own_ring) {
1269 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
e593bf97 1270 vq->split.vring.desc, vq->queue_dma_addr);
cbeedb72 1271 kfree(vq->split.desc_state);
2a2d1382
AL
1272 }
1273 list_del(&_vq->list);
1274 kfree(vq);
0a8a69dd 1275}
c6fd4701 1276EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 1277
e34f8725
RR
1278/* Manipulates transport-specific feature bits. */
1279void vring_transport_features(struct virtio_device *vdev)
1280{
1281 unsigned int i;
1282
1283 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1284 switch (i) {
9fa29b9d
MM
1285 case VIRTIO_RING_F_INDIRECT_DESC:
1286 break;
a5c262c5
MT
1287 case VIRTIO_RING_F_EVENT_IDX:
1288 break;
747ae34a
MT
1289 case VIRTIO_F_VERSION_1:
1290 break;
1a937693
MT
1291 case VIRTIO_F_IOMMU_PLATFORM:
1292 break;
e34f8725
RR
1293 default:
1294 /* We don't understand this bit. */
e16e12be 1295 __virtio_clear_bit(vdev, i);
e34f8725
RR
1296 }
1297 }
1298}
1299EXPORT_SYMBOL_GPL(vring_transport_features);
1300
5dfc1762
RR
1301/**
1302 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1303 * @vq: the struct virtqueue containing the vring of interest.
1304 *
1305 * Returns the size of the vring. This is mainly used for boasting to
1306 * userspace. Unlike other operations, this need not be serialized.
1307 */
8f9f4668
RJ
1308unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1309{
1310
1311 struct vring_virtqueue *vq = to_vvq(_vq);
1312
e593bf97 1313 return vq->split.vring.num;
8f9f4668
RJ
1314}
1315EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1316
b3b32c94
HG
1317bool virtqueue_is_broken(struct virtqueue *_vq)
1318{
1319 struct vring_virtqueue *vq = to_vvq(_vq);
1320
1321 return vq->broken;
1322}
1323EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1324
e2dcdfe9
RR
1325/*
1326 * This should prevent the device from being used, allowing drivers to
1327 * recover. You may need to grab appropriate locks to flush.
1328 */
1329void virtio_break_device(struct virtio_device *dev)
1330{
1331 struct virtqueue *_vq;
1332
1333 list_for_each_entry(_vq, &dev->vqs, list) {
1334 struct vring_virtqueue *vq = to_vvq(_vq);
1335 vq->broken = true;
1336 }
1337}
1338EXPORT_SYMBOL_GPL(virtio_break_device);
1339
2a2d1382 1340dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
89062652
CH
1341{
1342 struct vring_virtqueue *vq = to_vvq(_vq);
1343
2a2d1382
AL
1344 BUG_ON(!vq->we_own_ring);
1345
1346 return vq->queue_dma_addr;
89062652 1347}
2a2d1382 1348EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
89062652 1349
2a2d1382 1350dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
89062652
CH
1351{
1352 struct vring_virtqueue *vq = to_vvq(_vq);
1353
2a2d1382
AL
1354 BUG_ON(!vq->we_own_ring);
1355
1356 return vq->queue_dma_addr +
e593bf97 1357 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2a2d1382
AL
1358}
1359EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1360
1361dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1362{
1363 struct vring_virtqueue *vq = to_vvq(_vq);
1364
1365 BUG_ON(!vq->we_own_ring);
1366
1367 return vq->queue_dma_addr +
e593bf97 1368 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2a2d1382
AL
1369}
1370EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1371
1372const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1373{
e593bf97 1374 return &to_vvq(vq)->split.vring;
89062652 1375}
2a2d1382 1376EXPORT_SYMBOL_GPL(virtqueue_get_vring);
89062652 1377
c6fd4701 1378MODULE_LICENSE("GPL");