switch l2cap ->memcpy_fromiovec() to msghdr
[linux-block.git] / drivers / misc / vmw_vmci / vmci_queue_pair.c
CommitLineData
06164d2b
GZ
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
06164d2b
GZ
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
42281d20 18#include <linux/highmem.h>
06164d2b 19#include <linux/kernel.h>
42281d20 20#include <linux/mm.h>
06164d2b
GZ
21#include <linux/module.h>
22#include <linux/mutex.h>
42281d20 23#include <linux/pagemap.h>
6d6dfb4f 24#include <linux/pci.h>
42281d20
AK
25#include <linux/sched.h>
26#include <linux/slab.h>
d2f83e90 27#include <linux/uio.h>
06164d2b 28#include <linux/wait.h>
f6dcf8e7 29#include <linux/vmalloc.h>
06164d2b
GZ
30
31#include "vmci_handle_array.h"
32#include "vmci_queue_pair.h"
33#include "vmci_datagram.h"
34#include "vmci_resource.h"
35#include "vmci_context.h"
36#include "vmci_driver.h"
37#include "vmci_event.h"
38#include "vmci_route.h"
39
40/*
41 * In the following, we will distinguish between two kinds of VMX processes -
42 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
43 * VMCI page files in the VMX and supporting VM to VM communication and the
44 * newer ones that use the guest memory directly. We will in the following
45 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
46 * new-style VMX'en.
47 *
48 * The state transition datagram is as follows (the VMCIQPB_ prefix has been
49 * removed for readability) - see below for more details on the transtions:
50 *
51 * -------------- NEW -------------
52 * | |
53 * \_/ \_/
54 * CREATED_NO_MEM <-----------------> CREATED_MEM
55 * | | |
56 * | o-----------------------o |
57 * | | |
58 * \_/ \_/ \_/
59 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
60 * | | |
61 * | o----------------------o |
62 * | | |
63 * \_/ \_/ \_/
64 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
65 * | |
66 * | |
67 * -------------> gone <-------------
68 *
69 * In more detail. When a VMCI queue pair is first created, it will be in the
70 * VMCIQPB_NEW state. It will then move into one of the following states:
71 *
72 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
73 *
74 * - the created was performed by a host endpoint, in which case there is
75 * no backing memory yet.
76 *
77 * - the create was initiated by an old-style VMX, that uses
78 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
79 * a later point in time. This state can be distinguished from the one
80 * above by the context ID of the creator. A host side is not allowed to
81 * attach until the page store has been set.
82 *
83 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
84 * is created by a VMX using the queue pair device backend that
85 * sets the UVAs of the queue pair immediately and stores the
86 * information for later attachers. At this point, it is ready for
87 * the host side to attach to it.
88 *
89 * Once the queue pair is in one of the created states (with the exception of
90 * the case mentioned for older VMX'en above), it is possible to attach to the
91 * queue pair. Again we have two new states possible:
92 *
93 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
94 * paths:
95 *
96 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
97 * pair, and attaches to a queue pair previously created by the host side.
98 *
99 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
100 * already created by a guest.
101 *
102 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
103 * vmci_qp_broker_set_page_store (see below).
104 *
105 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
106 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
107 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
108 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
109 * will be entered.
110 *
111 * From the attached queue pair, the queue pair can enter the shutdown states
112 * when either side of the queue pair detaches. If the guest side detaches
113 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
114 * the content of the queue pair will no longer be available. If the host
115 * side detaches first, the queue pair will either enter the
116 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
117 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
118 * (e.g., the host detaches while a guest is stunned).
119 *
120 * New-style VMX'en will also unmap guest memory, if the guest is
121 * quiesced, e.g., during a snapshot operation. In that case, the guest
122 * memory will no longer be available, and the queue pair will transition from
123 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
124 * in which case the queue pair will transition from the *_NO_MEM state at that
125 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
126 * since the peer may have either attached or detached in the meantime. The
127 * values are laid out such that ++ on a state will move from a *_NO_MEM to a
128 * *_MEM state, and vice versa.
129 */
130
131/*
132 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
133 * types are passed around to enqueue and dequeue routines. Note that
134 * often the functions passed are simply wrappers around memcpy
135 * itself.
136 *
137 * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
138 * there's an unused last parameter for the hosted side. In
139 * ESX, that parameter holds a buffer type.
140 */
141typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
142 u64 queue_offset, const void *src,
143 size_t src_offset, size_t size);
144typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
145 const struct vmci_queue *queue,
146 u64 queue_offset, size_t size);
147
148/* The Kernel specific component of the struct vmci_queue structure. */
149struct vmci_queue_kern_if {
06164d2b
GZ
150 struct mutex __mutex; /* Protects the queue. */
151 struct mutex *mutex; /* Shared by producer and consumer queues. */
6d6dfb4f
AK
152 size_t num_pages; /* Number of pages incl. header. */
153 bool host; /* Host or guest? */
154 union {
155 struct {
156 dma_addr_t *pas;
157 void **vas;
158 } g; /* Used by the guest. */
159 struct {
160 struct page **page;
161 struct page **header_page;
162 } h; /* Used by the host. */
163 } u;
06164d2b
GZ
164};
165
166/*
167 * This structure is opaque to the clients.
168 */
169struct vmci_qp {
170 struct vmci_handle handle;
171 struct vmci_queue *produce_q;
172 struct vmci_queue *consume_q;
173 u64 produce_q_size;
174 u64 consume_q_size;
175 u32 peer;
176 u32 flags;
177 u32 priv_flags;
178 bool guest_endpoint;
179 unsigned int blocked;
180 unsigned int generation;
181 wait_queue_head_t event;
182};
183
184enum qp_broker_state {
185 VMCIQPB_NEW,
186 VMCIQPB_CREATED_NO_MEM,
187 VMCIQPB_CREATED_MEM,
188 VMCIQPB_ATTACHED_NO_MEM,
189 VMCIQPB_ATTACHED_MEM,
190 VMCIQPB_SHUTDOWN_NO_MEM,
191 VMCIQPB_SHUTDOWN_MEM,
192 VMCIQPB_GONE
193};
194
195#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
196 _qpb->state == VMCIQPB_ATTACHED_MEM || \
197 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
198
199/*
200 * In the queue pair broker, we always use the guest point of view for
201 * the produce and consume queue values and references, e.g., the
202 * produce queue size stored is the guests produce queue size. The
203 * host endpoint will need to swap these around. The only exception is
204 * the local queue pairs on the host, in which case the host endpoint
205 * that creates the queue pair will have the right orientation, and
206 * the attaching host endpoint will need to swap.
207 */
208struct qp_entry {
209 struct list_head list_item;
210 struct vmci_handle handle;
211 u32 peer;
212 u32 flags;
213 u64 produce_size;
214 u64 consume_size;
215 u32 ref_count;
216};
217
218struct qp_broker_entry {
219 struct vmci_resource resource;
220 struct qp_entry qp;
221 u32 create_id;
222 u32 attach_id;
223 enum qp_broker_state state;
224 bool require_trusted_attach;
225 bool created_by_trusted;
226 bool vmci_page_files; /* Created by VMX using VMCI page files */
227 struct vmci_queue *produce_q;
228 struct vmci_queue *consume_q;
229 struct vmci_queue_header saved_produce_q;
230 struct vmci_queue_header saved_consume_q;
231 vmci_event_release_cb wakeup_cb;
232 void *client_data;
233 void *local_mem; /* Kernel memory for local queue pair */
234};
235
236struct qp_guest_endpoint {
237 struct vmci_resource resource;
238 struct qp_entry qp;
239 u64 num_ppns;
240 void *produce_q;
241 void *consume_q;
e6389a13 242 struct ppn_set ppn_set;
06164d2b
GZ
243};
244
245struct qp_list {
246 struct list_head head;
247 struct mutex mutex; /* Protect queue list. */
248};
249
250static struct qp_list qp_broker_list = {
251 .head = LIST_HEAD_INIT(qp_broker_list.head),
252 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
253};
254
255static struct qp_list qp_guest_endpoints = {
256 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
257 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
258};
259
260#define INVALID_VMCI_GUEST_MEM_ID 0
42281d20
AK
261#define QPE_NUM_PAGES(_QPE) ((u32) \
262 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
263 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
06164d2b
GZ
264
265
266/*
267 * Frees kernel VA space for a given queue and its queue header, and
268 * frees physical data pages.
269 */
270static void qp_free_queue(void *q, u64 size)
271{
272 struct vmci_queue *queue = q;
273
274 if (queue) {
6d6dfb4f 275 u64 i;
06164d2b 276
6d6dfb4f
AK
277 /* Given size does not include header, so add in a page here. */
278 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
279 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
280 queue->kernel_if->u.g.vas[i],
281 queue->kernel_if->u.g.pas[i]);
282 }
06164d2b 283
6d6dfb4f 284 vfree(queue);
06164d2b
GZ
285 }
286}
287
288/*
6d6dfb4f
AK
289 * Allocates kernel queue pages of specified size with IOMMU mappings,
290 * plus space for the queue structure/kernel interface and the queue
291 * header.
06164d2b
GZ
292 */
293static void *qp_alloc_queue(u64 size, u32 flags)
294{
295 u64 i;
296 struct vmci_queue *queue;
6d6dfb4f
AK
297 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
298 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
299 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
300 const size_t queue_size =
301 sizeof(*queue) + sizeof(*queue->kernel_if) +
302 pas_size + vas_size;
303
304 queue = vmalloc(queue_size);
305 if (!queue)
06164d2b
GZ
306 return NULL;
307
6d6dfb4f 308 queue->q_header = NULL;
06164d2b
GZ
309 queue->saved_header = NULL;
310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
6d6dfb4f
AK
311 queue->kernel_if->mutex = NULL;
312 queue->kernel_if->num_pages = num_pages;
313 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
314 queue->kernel_if->u.g.vas =
315 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
06164d2b 316 queue->kernel_if->host = false;
06164d2b 317
6d6dfb4f
AK
318 for (i = 0; i < num_pages; i++) {
319 queue->kernel_if->u.g.vas[i] =
320 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
321 &queue->kernel_if->u.g.pas[i],
322 GFP_KERNEL);
323 if (!queue->kernel_if->u.g.vas[i]) {
324 /* Size excl. the header. */
325 qp_free_queue(queue, i * PAGE_SIZE);
326 return NULL;
327 }
06164d2b
GZ
328 }
329
6d6dfb4f
AK
330 /* Queue header is the first page. */
331 queue->q_header = queue->kernel_if->u.g.vas[0];
06164d2b 332
6d6dfb4f 333 return queue;
06164d2b
GZ
334}
335
336/*
337 * Copies from a given buffer or iovector to a VMCI Queue. Uses
338 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
339 * by traversing the offset -> page translation structure for the queue.
340 * Assumes that offset + size does not wrap around in the queue.
341 */
342static int __qp_memcpy_to_queue(struct vmci_queue *queue,
343 u64 queue_offset,
344 const void *src,
345 size_t size,
346 bool is_iovec)
347{
348 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
349 size_t bytes_copied = 0;
350
351 while (bytes_copied < size) {
6d6dfb4f
AK
352 const u64 page_index =
353 (queue_offset + bytes_copied) / PAGE_SIZE;
354 const size_t page_offset =
06164d2b
GZ
355 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
356 void *va;
357 size_t to_copy;
358
6d6dfb4f
AK
359 if (kernel_if->host)
360 va = kmap(kernel_if->u.h.page[page_index]);
361 else
362 va = kernel_if->u.g.vas[page_index + 1];
363 /* Skip header. */
06164d2b
GZ
364
365 if (size - bytes_copied > PAGE_SIZE - page_offset)
366 /* Enough payload to fill up from this page. */
367 to_copy = PAGE_SIZE - page_offset;
368 else
369 to_copy = size - bytes_copied;
370
371 if (is_iovec) {
372 struct iovec *iov = (struct iovec *)src;
373 int err;
374
375 /* The iovec will track bytes_copied internally. */
376 err = memcpy_fromiovec((u8 *)va + page_offset,
377 iov, to_copy);
378 if (err != 0) {
6d6dfb4f
AK
379 if (kernel_if->host)
380 kunmap(kernel_if->u.h.page[page_index]);
06164d2b
GZ
381 return VMCI_ERROR_INVALID_ARGS;
382 }
383 } else {
384 memcpy((u8 *)va + page_offset,
385 (u8 *)src + bytes_copied, to_copy);
386 }
387
388 bytes_copied += to_copy;
6d6dfb4f
AK
389 if (kernel_if->host)
390 kunmap(kernel_if->u.h.page[page_index]);
06164d2b
GZ
391 }
392
393 return VMCI_SUCCESS;
394}
395
396/*
397 * Copies to a given buffer or iovector from a VMCI Queue. Uses
398 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
399 * by traversing the offset -> page translation structure for the queue.
400 * Assumes that offset + size does not wrap around in the queue.
401 */
402static int __qp_memcpy_from_queue(void *dest,
403 const struct vmci_queue *queue,
404 u64 queue_offset,
405 size_t size,
406 bool is_iovec)
407{
408 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
409 size_t bytes_copied = 0;
410
411 while (bytes_copied < size) {
6d6dfb4f
AK
412 const u64 page_index =
413 (queue_offset + bytes_copied) / PAGE_SIZE;
414 const size_t page_offset =
06164d2b
GZ
415 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
416 void *va;
417 size_t to_copy;
418
6d6dfb4f
AK
419 if (kernel_if->host)
420 va = kmap(kernel_if->u.h.page[page_index]);
421 else
422 va = kernel_if->u.g.vas[page_index + 1];
423 /* Skip header. */
06164d2b
GZ
424
425 if (size - bytes_copied > PAGE_SIZE - page_offset)
426 /* Enough payload to fill up this page. */
427 to_copy = PAGE_SIZE - page_offset;
428 else
429 to_copy = size - bytes_copied;
430
431 if (is_iovec) {
432 struct iovec *iov = (struct iovec *)dest;
433 int err;
434
435 /* The iovec will track bytes_copied internally. */
436 err = memcpy_toiovec(iov, (u8 *)va + page_offset,
437 to_copy);
438 if (err != 0) {
6d6dfb4f
AK
439 if (kernel_if->host)
440 kunmap(kernel_if->u.h.page[page_index]);
06164d2b
GZ
441 return VMCI_ERROR_INVALID_ARGS;
442 }
443 } else {
444 memcpy((u8 *)dest + bytes_copied,
445 (u8 *)va + page_offset, to_copy);
446 }
447
448 bytes_copied += to_copy;
6d6dfb4f
AK
449 if (kernel_if->host)
450 kunmap(kernel_if->u.h.page[page_index]);
06164d2b
GZ
451 }
452
453 return VMCI_SUCCESS;
454}
455
456/*
457 * Allocates two list of PPNs --- one for the pages in the produce queue,
458 * and the other for the pages in the consume queue. Intializes the list
459 * of PPNs with the page frame numbers of the KVA for the two queues (and
460 * the queue headers).
461 */
462static int qp_alloc_ppn_set(void *prod_q,
463 u64 num_produce_pages,
464 void *cons_q,
e6389a13 465 u64 num_consume_pages, struct ppn_set *ppn_set)
06164d2b
GZ
466{
467 u32 *produce_ppns;
468 u32 *consume_ppns;
469 struct vmci_queue *produce_q = prod_q;
470 struct vmci_queue *consume_q = cons_q;
471 u64 i;
472
473 if (!produce_q || !num_produce_pages || !consume_q ||
474 !num_consume_pages || !ppn_set)
475 return VMCI_ERROR_INVALID_ARGS;
476
477 if (ppn_set->initialized)
478 return VMCI_ERROR_ALREADY_EXISTS;
479
480 produce_ppns =
481 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
482 if (!produce_ppns)
483 return VMCI_ERROR_NO_MEM;
484
485 consume_ppns =
486 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
487 if (!consume_ppns) {
488 kfree(produce_ppns);
489 return VMCI_ERROR_NO_MEM;
490 }
491
6d6dfb4f 492 for (i = 0; i < num_produce_pages; i++) {
06164d2b
GZ
493 unsigned long pfn;
494
495 produce_ppns[i] =
6d6dfb4f 496 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
06164d2b
GZ
497 pfn = produce_ppns[i];
498
499 /* Fail allocation if PFN isn't supported by hypervisor. */
500 if (sizeof(pfn) > sizeof(*produce_ppns)
501 && pfn != produce_ppns[i])
502 goto ppn_error;
503 }
504
6d6dfb4f 505 for (i = 0; i < num_consume_pages; i++) {
06164d2b
GZ
506 unsigned long pfn;
507
508 consume_ppns[i] =
6d6dfb4f 509 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
06164d2b
GZ
510 pfn = consume_ppns[i];
511
512 /* Fail allocation if PFN isn't supported by hypervisor. */
513 if (sizeof(pfn) > sizeof(*consume_ppns)
514 && pfn != consume_ppns[i])
515 goto ppn_error;
516 }
517
518 ppn_set->num_produce_pages = num_produce_pages;
519 ppn_set->num_consume_pages = num_consume_pages;
520 ppn_set->produce_ppns = produce_ppns;
521 ppn_set->consume_ppns = consume_ppns;
522 ppn_set->initialized = true;
523 return VMCI_SUCCESS;
524
525 ppn_error:
526 kfree(produce_ppns);
527 kfree(consume_ppns);
528 return VMCI_ERROR_INVALID_ARGS;
529}
530
531/*
532 * Frees the two list of PPNs for a queue pair.
533 */
e6389a13 534static void qp_free_ppn_set(struct ppn_set *ppn_set)
06164d2b
GZ
535{
536 if (ppn_set->initialized) {
537 /* Do not call these functions on NULL inputs. */
538 kfree(ppn_set->produce_ppns);
539 kfree(ppn_set->consume_ppns);
540 }
541 memset(ppn_set, 0, sizeof(*ppn_set));
542}
543
544/*
545 * Populates the list of PPNs in the hypercall structure with the PPNS
546 * of the produce queue and the consume queue.
547 */
e6389a13 548static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
06164d2b
GZ
549{
550 memcpy(call_buf, ppn_set->produce_ppns,
551 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
552 memcpy(call_buf +
553 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
554 ppn_set->consume_ppns,
555 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
556
557 return VMCI_SUCCESS;
558}
559
560static int qp_memcpy_to_queue(struct vmci_queue *queue,
561 u64 queue_offset,
562 const void *src, size_t src_offset, size_t size)
563{
564 return __qp_memcpy_to_queue(queue, queue_offset,
565 (u8 *)src + src_offset, size, false);
566}
567
568static int qp_memcpy_from_queue(void *dest,
569 size_t dest_offset,
570 const struct vmci_queue *queue,
571 u64 queue_offset, size_t size)
572{
573 return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
574 queue, queue_offset, size, false);
575}
576
577/*
578 * Copies from a given iovec from a VMCI Queue.
579 */
580static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
581 u64 queue_offset,
582 const void *src,
583 size_t src_offset, size_t size)
584{
585
586 /*
587 * We ignore src_offset because src is really a struct iovec * and will
588 * maintain offset internally.
589 */
590 return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
591}
592
593/*
594 * Copies to a given iovec from a VMCI Queue.
595 */
596static int qp_memcpy_from_queue_iov(void *dest,
597 size_t dest_offset,
598 const struct vmci_queue *queue,
599 u64 queue_offset, size_t size)
600{
601 /*
602 * We ignore dest_offset because dest is really a struct iovec * and
603 * will maintain offset internally.
604 */
605 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
606}
607
608/*
609 * Allocates kernel VA space of specified size plus space for the queue
610 * and kernel interface. This is different from the guest queue allocator,
611 * because we do not allocate our own queue header/data pages here but
612 * share those of the guest.
613 */
614static struct vmci_queue *qp_host_alloc_queue(u64 size)
615{
616 struct vmci_queue *queue;
42281d20 617 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
06164d2b
GZ
618 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
619 const size_t queue_page_size =
6d6dfb4f 620 num_pages * sizeof(*queue->kernel_if->u.h.page);
06164d2b
GZ
621
622 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
623 if (queue) {
624 queue->q_header = NULL;
625 queue->saved_header = NULL;
6d6dfb4f 626 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
06164d2b
GZ
627 queue->kernel_if->host = true;
628 queue->kernel_if->mutex = NULL;
629 queue->kernel_if->num_pages = num_pages;
6d6dfb4f 630 queue->kernel_if->u.h.header_page =
06164d2b 631 (struct page **)((u8 *)queue + queue_size);
6d6dfb4f
AK
632 queue->kernel_if->u.h.page =
633 &queue->kernel_if->u.h.header_page[1];
06164d2b
GZ
634 }
635
636 return queue;
637}
638
639/*
640 * Frees kernel memory for a given queue (header plus translation
641 * structure).
642 */
643static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
644{
645 kfree(queue);
646}
647
648/*
649 * Initialize the mutex for the pair of queues. This mutex is used to
650 * protect the q_header and the buffer from changing out from under any
651 * users of either queue. Of course, it's only any good if the mutexes
652 * are actually acquired. Queue structure must lie on non-paged memory
653 * or we cannot guarantee access to the mutex.
654 */
655static void qp_init_queue_mutex(struct vmci_queue *produce_q,
656 struct vmci_queue *consume_q)
657{
658 /*
659 * Only the host queue has shared state - the guest queues do not
660 * need to synchronize access using a queue mutex.
661 */
662
663 if (produce_q->kernel_if->host) {
664 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
665 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
666 mutex_init(produce_q->kernel_if->mutex);
667 }
668}
669
670/*
671 * Cleans up the mutex for the pair of queues.
672 */
673static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
674 struct vmci_queue *consume_q)
675{
676 if (produce_q->kernel_if->host) {
677 produce_q->kernel_if->mutex = NULL;
678 consume_q->kernel_if->mutex = NULL;
679 }
680}
681
682/*
683 * Acquire the mutex for the queue. Note that the produce_q and
684 * the consume_q share a mutex. So, only one of the two need to
685 * be passed in to this routine. Either will work just fine.
686 */
687static void qp_acquire_queue_mutex(struct vmci_queue *queue)
688{
689 if (queue->kernel_if->host)
690 mutex_lock(queue->kernel_if->mutex);
691}
692
693/*
694 * Release the mutex for the queue. Note that the produce_q and
695 * the consume_q share a mutex. So, only one of the two need to
696 * be passed in to this routine. Either will work just fine.
697 */
698static void qp_release_queue_mutex(struct vmci_queue *queue)
699{
700 if (queue->kernel_if->host)
701 mutex_unlock(queue->kernel_if->mutex);
702}
703
704/*
705 * Helper function to release pages in the PageStoreAttachInfo
706 * previously obtained using get_user_pages.
707 */
708static void qp_release_pages(struct page **pages,
709 u64 num_pages, bool dirty)
710{
711 int i;
712
713 for (i = 0; i < num_pages; i++) {
714 if (dirty)
715 set_page_dirty(pages[i]);
716
717 page_cache_release(pages[i]);
718 pages[i] = NULL;
719 }
720}
721
722/*
723 * Lock the user pages referenced by the {produce,consume}Buffer
724 * struct into memory and populate the {produce,consume}Pages
725 * arrays in the attach structure with them.
726 */
727static int qp_host_get_user_memory(u64 produce_uva,
728 u64 consume_uva,
729 struct vmci_queue *produce_q,
730 struct vmci_queue *consume_q)
731{
732 int retval;
733 int err = VMCI_SUCCESS;
734
240ddd49
JK
735 retval = get_user_pages_fast((uintptr_t) produce_uva,
736 produce_q->kernel_if->num_pages, 1,
737 produce_q->kernel_if->u.h.header_page);
06164d2b
GZ
738 if (retval < produce_q->kernel_if->num_pages) {
739 pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
6d6dfb4f
AK
740 qp_release_pages(produce_q->kernel_if->u.h.header_page,
741 retval, false);
06164d2b
GZ
742 err = VMCI_ERROR_NO_MEM;
743 goto out;
744 }
745
240ddd49
JK
746 retval = get_user_pages_fast((uintptr_t) consume_uva,
747 consume_q->kernel_if->num_pages, 1,
748 consume_q->kernel_if->u.h.header_page);
06164d2b
GZ
749 if (retval < consume_q->kernel_if->num_pages) {
750 pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
6d6dfb4f
AK
751 qp_release_pages(consume_q->kernel_if->u.h.header_page,
752 retval, false);
753 qp_release_pages(produce_q->kernel_if->u.h.header_page,
06164d2b
GZ
754 produce_q->kernel_if->num_pages, false);
755 err = VMCI_ERROR_NO_MEM;
756 }
757
758 out:
06164d2b
GZ
759 return err;
760}
761
762/*
763 * Registers the specification of the user pages used for backing a queue
764 * pair. Enough information to map in pages is stored in the OS specific
765 * part of the struct vmci_queue structure.
766 */
767static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
768 struct vmci_queue *produce_q,
769 struct vmci_queue *consume_q)
770{
771 u64 produce_uva;
772 u64 consume_uva;
773
774 /*
775 * The new style and the old style mapping only differs in
776 * that we either get a single or two UVAs, so we split the
777 * single UVA range at the appropriate spot.
778 */
779 produce_uva = page_store->pages;
780 consume_uva = page_store->pages +
781 produce_q->kernel_if->num_pages * PAGE_SIZE;
782 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
783 consume_q);
784}
785
786/*
787 * Releases and removes the references to user pages stored in the attach
788 * struct. Pages are released from the page cache and may become
789 * swappable again.
790 */
791static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
792 struct vmci_queue *consume_q)
793{
6d6dfb4f 794 qp_release_pages(produce_q->kernel_if->u.h.header_page,
06164d2b 795 produce_q->kernel_if->num_pages, true);
6d6dfb4f
AK
796 memset(produce_q->kernel_if->u.h.header_page, 0,
797 sizeof(*produce_q->kernel_if->u.h.header_page) *
06164d2b 798 produce_q->kernel_if->num_pages);
6d6dfb4f 799 qp_release_pages(consume_q->kernel_if->u.h.header_page,
06164d2b 800 consume_q->kernel_if->num_pages, true);
6d6dfb4f
AK
801 memset(consume_q->kernel_if->u.h.header_page, 0,
802 sizeof(*consume_q->kernel_if->u.h.header_page) *
06164d2b
GZ
803 consume_q->kernel_if->num_pages);
804}
805
806/*
807 * Once qp_host_register_user_memory has been performed on a
808 * queue, the queue pair headers can be mapped into the
809 * kernel. Once mapped, they must be unmapped with
810 * qp_host_unmap_queues prior to calling
811 * qp_host_unregister_user_memory.
812 * Pages are pinned.
813 */
814static int qp_host_map_queues(struct vmci_queue *produce_q,
815 struct vmci_queue *consume_q)
816{
817 int result;
818
819 if (!produce_q->q_header || !consume_q->q_header) {
820 struct page *headers[2];
821
822 if (produce_q->q_header != consume_q->q_header)
823 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
824
6d6dfb4f
AK
825 if (produce_q->kernel_if->u.h.header_page == NULL ||
826 *produce_q->kernel_if->u.h.header_page == NULL)
06164d2b
GZ
827 return VMCI_ERROR_UNAVAILABLE;
828
6d6dfb4f
AK
829 headers[0] = *produce_q->kernel_if->u.h.header_page;
830 headers[1] = *consume_q->kernel_if->u.h.header_page;
06164d2b
GZ
831
832 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
833 if (produce_q->q_header != NULL) {
834 consume_q->q_header =
835 (struct vmci_queue_header *)((u8 *)
836 produce_q->q_header +
837 PAGE_SIZE);
838 result = VMCI_SUCCESS;
839 } else {
840 pr_warn("vmap failed\n");
841 result = VMCI_ERROR_NO_MEM;
842 }
843 } else {
844 result = VMCI_SUCCESS;
845 }
846
847 return result;
848}
849
850/*
851 * Unmaps previously mapped queue pair headers from the kernel.
852 * Pages are unpinned.
853 */
854static int qp_host_unmap_queues(u32 gid,
855 struct vmci_queue *produce_q,
856 struct vmci_queue *consume_q)
857{
858 if (produce_q->q_header) {
859 if (produce_q->q_header < consume_q->q_header)
860 vunmap(produce_q->q_header);
861 else
862 vunmap(consume_q->q_header);
863
864 produce_q->q_header = NULL;
865 consume_q->q_header = NULL;
866 }
867
868 return VMCI_SUCCESS;
869}
870
871/*
872 * Finds the entry in the list corresponding to a given handle. Assumes
873 * that the list is locked.
874 */
875static struct qp_entry *qp_list_find(struct qp_list *qp_list,
876 struct vmci_handle handle)
877{
878 struct qp_entry *entry;
879
880 if (vmci_handle_is_invalid(handle))
881 return NULL;
882
883 list_for_each_entry(entry, &qp_list->head, list_item) {
884 if (vmci_handle_is_equal(entry->handle, handle))
885 return entry;
886 }
887
888 return NULL;
889}
890
891/*
892 * Finds the entry in the list corresponding to a given handle.
893 */
894static struct qp_guest_endpoint *
895qp_guest_handle_to_entry(struct vmci_handle handle)
896{
897 struct qp_guest_endpoint *entry;
898 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
899
900 entry = qp ? container_of(
901 qp, struct qp_guest_endpoint, qp) : NULL;
902 return entry;
903}
904
905/*
906 * Finds the entry in the list corresponding to a given handle.
907 */
908static struct qp_broker_entry *
909qp_broker_handle_to_entry(struct vmci_handle handle)
910{
911 struct qp_broker_entry *entry;
912 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
913
914 entry = qp ? container_of(
915 qp, struct qp_broker_entry, qp) : NULL;
916 return entry;
917}
918
919/*
920 * Dispatches a queue pair event message directly into the local event
921 * queue.
922 */
923static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
924{
925 u32 context_id = vmci_get_context_id();
926 struct vmci_event_qp ev;
927
928 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
929 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
930 VMCI_CONTEXT_RESOURCE_ID);
931 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
932 ev.msg.event_data.event =
933 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
934 ev.payload.peer_id = context_id;
935 ev.payload.handle = handle;
936
937 return vmci_event_dispatch(&ev.msg.hdr);
938}
939
940/*
941 * Allocates and initializes a qp_guest_endpoint structure.
942 * Allocates a queue_pair rid (and handle) iff the given entry has
943 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
944 * are reserved handles. Assumes that the QP list mutex is held
945 * by the caller.
946 */
947static struct qp_guest_endpoint *
948qp_guest_endpoint_create(struct vmci_handle handle,
949 u32 peer,
950 u32 flags,
951 u64 produce_size,
952 u64 consume_size,
953 void *produce_q,
954 void *consume_q)
955{
956 int result;
957 struct qp_guest_endpoint *entry;
958 /* One page each for the queue headers. */
42281d20
AK
959 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
960 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
06164d2b
GZ
961
962 if (vmci_handle_is_invalid(handle)) {
963 u32 context_id = vmci_get_context_id();
964
965 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
966 }
967
968 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
969 if (entry) {
970 entry->qp.peer = peer;
971 entry->qp.flags = flags;
972 entry->qp.produce_size = produce_size;
973 entry->qp.consume_size = consume_size;
974 entry->qp.ref_count = 0;
975 entry->num_ppns = num_ppns;
976 entry->produce_q = produce_q;
977 entry->consume_q = consume_q;
978 INIT_LIST_HEAD(&entry->qp.list_item);
979
980 /* Add resource obj */
981 result = vmci_resource_add(&entry->resource,
982 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
983 handle);
984 entry->qp.handle = vmci_resource_handle(&entry->resource);
985 if ((result != VMCI_SUCCESS) ||
986 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
987 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
988 handle.context, handle.resource, result);
989 kfree(entry);
990 entry = NULL;
991 }
992 }
993 return entry;
994}
995
996/*
997 * Frees a qp_guest_endpoint structure.
998 */
999static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
1000{
1001 qp_free_ppn_set(&entry->ppn_set);
1002 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
1003 qp_free_queue(entry->produce_q, entry->qp.produce_size);
1004 qp_free_queue(entry->consume_q, entry->qp.consume_size);
1005 /* Unlink from resource hash table and free callback */
1006 vmci_resource_remove(&entry->resource);
1007
1008 kfree(entry);
1009}
1010
1011/*
1012 * Helper to make a queue_pairAlloc hypercall when the driver is
1013 * supporting a guest device.
1014 */
1015static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
1016{
1017 struct vmci_qp_alloc_msg *alloc_msg;
1018 size_t msg_size;
1019 int result;
1020
1021 if (!entry || entry->num_ppns <= 2)
1022 return VMCI_ERROR_INVALID_ARGS;
1023
1024 msg_size = sizeof(*alloc_msg) +
1025 (size_t) entry->num_ppns * sizeof(u32);
1026 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
1027 if (!alloc_msg)
1028 return VMCI_ERROR_NO_MEM;
1029
1030 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1031 VMCI_QUEUEPAIR_ALLOC);
1032 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
1033 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
1034 alloc_msg->handle = entry->qp.handle;
1035 alloc_msg->peer = entry->qp.peer;
1036 alloc_msg->flags = entry->qp.flags;
1037 alloc_msg->produce_size = entry->qp.produce_size;
1038 alloc_msg->consume_size = entry->qp.consume_size;
1039 alloc_msg->num_ppns = entry->num_ppns;
1040
1041 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
1042 &entry->ppn_set);
1043 if (result == VMCI_SUCCESS)
1044 result = vmci_send_datagram(&alloc_msg->hdr);
1045
1046 kfree(alloc_msg);
1047
1048 return result;
1049}
1050
1051/*
1052 * Helper to make a queue_pairDetach hypercall when the driver is
1053 * supporting a guest device.
1054 */
1055static int qp_detatch_hypercall(struct vmci_handle handle)
1056{
1057 struct vmci_qp_detach_msg detach_msg;
1058
1059 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1060 VMCI_QUEUEPAIR_DETACH);
1061 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
1062 detach_msg.hdr.payload_size = sizeof(handle);
1063 detach_msg.handle = handle;
1064
1065 return vmci_send_datagram(&detach_msg.hdr);
1066}
1067
1068/*
1069 * Adds the given entry to the list. Assumes that the list is locked.
1070 */
1071static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1072{
1073 if (entry)
1074 list_add(&entry->list_item, &qp_list->head);
1075}
1076
1077/*
1078 * Removes the given entry from the list. Assumes that the list is locked.
1079 */
1080static void qp_list_remove_entry(struct qp_list *qp_list,
1081 struct qp_entry *entry)
1082{
1083 if (entry)
1084 list_del(&entry->list_item);
1085}
1086
1087/*
1088 * Helper for VMCI queue_pair detach interface. Frees the physical
1089 * pages for the queue pair.
1090 */
1091static int qp_detatch_guest_work(struct vmci_handle handle)
1092{
1093 int result;
1094 struct qp_guest_endpoint *entry;
1095 u32 ref_count = ~0; /* To avoid compiler warning below */
1096
1097 mutex_lock(&qp_guest_endpoints.mutex);
1098
1099 entry = qp_guest_handle_to_entry(handle);
1100 if (!entry) {
1101 mutex_unlock(&qp_guest_endpoints.mutex);
1102 return VMCI_ERROR_NOT_FOUND;
1103 }
1104
1105 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1106 result = VMCI_SUCCESS;
1107
1108 if (entry->qp.ref_count > 1) {
1109 result = qp_notify_peer_local(false, handle);
1110 /*
1111 * We can fail to notify a local queuepair
1112 * because we can't allocate. We still want
1113 * to release the entry if that happens, so
1114 * don't bail out yet.
1115 */
1116 }
1117 } else {
1118 result = qp_detatch_hypercall(handle);
1119 if (result < VMCI_SUCCESS) {
1120 /*
1121 * We failed to notify a non-local queuepair.
1122 * That other queuepair might still be
1123 * accessing the shared memory, so don't
1124 * release the entry yet. It will get cleaned
1125 * up by VMCIqueue_pair_Exit() if necessary
1126 * (assuming we are going away, otherwise why
1127 * did this fail?).
1128 */
1129
1130 mutex_unlock(&qp_guest_endpoints.mutex);
1131 return result;
1132 }
1133 }
1134
1135 /*
1136 * If we get here then we either failed to notify a local queuepair, or
1137 * we succeeded in all cases. Release the entry if required.
1138 */
1139
1140 entry->qp.ref_count--;
1141 if (entry->qp.ref_count == 0)
1142 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1143
1144 /* If we didn't remove the entry, this could change once we unlock. */
1145 if (entry)
1146 ref_count = entry->qp.ref_count;
1147
1148 mutex_unlock(&qp_guest_endpoints.mutex);
1149
1150 if (ref_count == 0)
1151 qp_guest_endpoint_destroy(entry);
1152
1153 return result;
1154}
1155
1156/*
1157 * This functions handles the actual allocation of a VMCI queue
1158 * pair guest endpoint. Allocates physical pages for the queue
1159 * pair. It makes OS dependent calls through generic wrappers.
1160 */
1161static int qp_alloc_guest_work(struct vmci_handle *handle,
1162 struct vmci_queue **produce_q,
1163 u64 produce_size,
1164 struct vmci_queue **consume_q,
1165 u64 consume_size,
1166 u32 peer,
1167 u32 flags,
1168 u32 priv_flags)
1169{
1170 const u64 num_produce_pages =
42281d20 1171 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
06164d2b 1172 const u64 num_consume_pages =
42281d20 1173 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
06164d2b
GZ
1174 void *my_produce_q = NULL;
1175 void *my_consume_q = NULL;
1176 int result;
1177 struct qp_guest_endpoint *queue_pair_entry = NULL;
1178
1179 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1180 return VMCI_ERROR_NO_ACCESS;
1181
1182 mutex_lock(&qp_guest_endpoints.mutex);
1183
1184 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1185 if (queue_pair_entry) {
1186 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1187 /* Local attach case. */
1188 if (queue_pair_entry->qp.ref_count > 1) {
1189 pr_devel("Error attempting to attach more than once\n");
1190 result = VMCI_ERROR_UNAVAILABLE;
1191 goto error_keep_entry;
1192 }
1193
1194 if (queue_pair_entry->qp.produce_size != consume_size ||
1195 queue_pair_entry->qp.consume_size !=
1196 produce_size ||
1197 queue_pair_entry->qp.flags !=
1198 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1199 pr_devel("Error mismatched queue pair in local attach\n");
1200 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1201 goto error_keep_entry;
1202 }
1203
1204 /*
1205 * Do a local attach. We swap the consume and
1206 * produce queues for the attacher and deliver
1207 * an attach event.
1208 */
1209 result = qp_notify_peer_local(true, *handle);
1210 if (result < VMCI_SUCCESS)
1211 goto error_keep_entry;
1212
1213 my_produce_q = queue_pair_entry->consume_q;
1214 my_consume_q = queue_pair_entry->produce_q;
1215 goto out;
1216 }
1217
1218 result = VMCI_ERROR_ALREADY_EXISTS;
1219 goto error_keep_entry;
1220 }
1221
1222 my_produce_q = qp_alloc_queue(produce_size, flags);
1223 if (!my_produce_q) {
1224 pr_warn("Error allocating pages for produce queue\n");
1225 result = VMCI_ERROR_NO_MEM;
1226 goto error;
1227 }
1228
1229 my_consume_q = qp_alloc_queue(consume_size, flags);
1230 if (!my_consume_q) {
1231 pr_warn("Error allocating pages for consume queue\n");
1232 result = VMCI_ERROR_NO_MEM;
1233 goto error;
1234 }
1235
1236 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1237 produce_size, consume_size,
1238 my_produce_q, my_consume_q);
1239 if (!queue_pair_entry) {
1240 pr_warn("Error allocating memory in %s\n", __func__);
1241 result = VMCI_ERROR_NO_MEM;
1242 goto error;
1243 }
1244
1245 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1246 num_consume_pages,
1247 &queue_pair_entry->ppn_set);
1248 if (result < VMCI_SUCCESS) {
1249 pr_warn("qp_alloc_ppn_set failed\n");
1250 goto error;
1251 }
1252
1253 /*
1254 * It's only necessary to notify the host if this queue pair will be
1255 * attached to from another context.
1256 */
1257 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1258 /* Local create case. */
1259 u32 context_id = vmci_get_context_id();
1260
1261 /*
1262 * Enforce similar checks on local queue pairs as we
1263 * do for regular ones. The handle's context must
1264 * match the creator or attacher context id (here they
1265 * are both the current context id) and the
1266 * attach-only flag cannot exist during create. We
1267 * also ensure specified peer is this context or an
1268 * invalid one.
1269 */
1270 if (queue_pair_entry->qp.handle.context != context_id ||
1271 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1272 queue_pair_entry->qp.peer != context_id)) {
1273 result = VMCI_ERROR_NO_ACCESS;
1274 goto error;
1275 }
1276
1277 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1278 result = VMCI_ERROR_NOT_FOUND;
1279 goto error;
1280 }
1281 } else {
1282 result = qp_alloc_hypercall(queue_pair_entry);
1283 if (result < VMCI_SUCCESS) {
1284 pr_warn("qp_alloc_hypercall result = %d\n", result);
1285 goto error;
1286 }
1287 }
1288
1289 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1290 (struct vmci_queue *)my_consume_q);
1291
1292 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1293
1294 out:
1295 queue_pair_entry->qp.ref_count++;
1296 *handle = queue_pair_entry->qp.handle;
1297 *produce_q = (struct vmci_queue *)my_produce_q;
1298 *consume_q = (struct vmci_queue *)my_consume_q;
1299
1300 /*
1301 * We should initialize the queue pair header pages on a local
1302 * queue pair create. For non-local queue pairs, the
1303 * hypervisor initializes the header pages in the create step.
1304 */
1305 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1306 queue_pair_entry->qp.ref_count == 1) {
1307 vmci_q_header_init((*produce_q)->q_header, *handle);
1308 vmci_q_header_init((*consume_q)->q_header, *handle);
1309 }
1310
1311 mutex_unlock(&qp_guest_endpoints.mutex);
1312
1313 return VMCI_SUCCESS;
1314
1315 error:
1316 mutex_unlock(&qp_guest_endpoints.mutex);
1317 if (queue_pair_entry) {
1318 /* The queues will be freed inside the destroy routine. */
1319 qp_guest_endpoint_destroy(queue_pair_entry);
1320 } else {
1321 qp_free_queue(my_produce_q, produce_size);
1322 qp_free_queue(my_consume_q, consume_size);
1323 }
1324 return result;
1325
1326 error_keep_entry:
1327 /* This path should only be used when an existing entry was found. */
1328 mutex_unlock(&qp_guest_endpoints.mutex);
1329 return result;
1330}
1331
1332/*
1333 * The first endpoint issuing a queue pair allocation will create the state
1334 * of the queue pair in the queue pair broker.
1335 *
1336 * If the creator is a guest, it will associate a VMX virtual address range
1337 * with the queue pair as specified by the page_store. For compatibility with
1338 * older VMX'en, that would use a separate step to set the VMX virtual
1339 * address range, the virtual address range can be registered later using
1340 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1341 * used.
1342 *
1343 * If the creator is the host, a page_store of NULL should be used as well,
1344 * since the host is not able to supply a page store for the queue pair.
1345 *
1346 * For older VMX and host callers, the queue pair will be created in the
1347 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1348 * created in VMCOQPB_CREATED_MEM state.
1349 */
1350static int qp_broker_create(struct vmci_handle handle,
1351 u32 peer,
1352 u32 flags,
1353 u32 priv_flags,
1354 u64 produce_size,
1355 u64 consume_size,
1356 struct vmci_qp_page_store *page_store,
1357 struct vmci_ctx *context,
1358 vmci_event_release_cb wakeup_cb,
1359 void *client_data, struct qp_broker_entry **ent)
1360{
1361 struct qp_broker_entry *entry = NULL;
1362 const u32 context_id = vmci_ctx_get_id(context);
1363 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1364 int result;
1365 u64 guest_produce_size;
1366 u64 guest_consume_size;
1367
1368 /* Do not create if the caller asked not to. */
1369 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1370 return VMCI_ERROR_NOT_FOUND;
1371
1372 /*
1373 * Creator's context ID should match handle's context ID or the creator
1374 * must allow the context in handle's context ID as the "peer".
1375 */
1376 if (handle.context != context_id && handle.context != peer)
1377 return VMCI_ERROR_NO_ACCESS;
1378
1379 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1380 return VMCI_ERROR_DST_UNREACHABLE;
1381
1382 /*
1383 * Creator's context ID for local queue pairs should match the
1384 * peer, if a peer is specified.
1385 */
1386 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1387 return VMCI_ERROR_NO_ACCESS;
1388
1389 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1390 if (!entry)
1391 return VMCI_ERROR_NO_MEM;
1392
1393 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1394 /*
1395 * The queue pair broker entry stores values from the guest
1396 * point of view, so a creating host side endpoint should swap
1397 * produce and consume values -- unless it is a local queue
1398 * pair, in which case no swapping is necessary, since the local
1399 * attacher will swap queues.
1400 */
1401
1402 guest_produce_size = consume_size;
1403 guest_consume_size = produce_size;
1404 } else {
1405 guest_produce_size = produce_size;
1406 guest_consume_size = consume_size;
1407 }
1408
1409 entry->qp.handle = handle;
1410 entry->qp.peer = peer;
1411 entry->qp.flags = flags;
1412 entry->qp.produce_size = guest_produce_size;
1413 entry->qp.consume_size = guest_consume_size;
1414 entry->qp.ref_count = 1;
1415 entry->create_id = context_id;
1416 entry->attach_id = VMCI_INVALID_ID;
1417 entry->state = VMCIQPB_NEW;
1418 entry->require_trusted_attach =
1419 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1420 entry->created_by_trusted =
1421 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1422 entry->vmci_page_files = false;
1423 entry->wakeup_cb = wakeup_cb;
1424 entry->client_data = client_data;
1425 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1426 if (entry->produce_q == NULL) {
1427 result = VMCI_ERROR_NO_MEM;
1428 goto error;
1429 }
1430 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1431 if (entry->consume_q == NULL) {
1432 result = VMCI_ERROR_NO_MEM;
1433 goto error;
1434 }
1435
1436 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1437
1438 INIT_LIST_HEAD(&entry->qp.list_item);
1439
1440 if (is_local) {
1441 u8 *tmp;
1442
1443 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1444 PAGE_SIZE, GFP_KERNEL);
1445 if (entry->local_mem == NULL) {
1446 result = VMCI_ERROR_NO_MEM;
1447 goto error;
1448 }
1449 entry->state = VMCIQPB_CREATED_MEM;
1450 entry->produce_q->q_header = entry->local_mem;
1451 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
42281d20 1452 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
06164d2b
GZ
1453 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1454 } else if (page_store) {
1455 /*
1456 * The VMX already initialized the queue pair headers, so no
1457 * need for the kernel side to do that.
1458 */
1459 result = qp_host_register_user_memory(page_store,
1460 entry->produce_q,
1461 entry->consume_q);
1462 if (result < VMCI_SUCCESS)
1463 goto error;
1464
1465 entry->state = VMCIQPB_CREATED_MEM;
1466 } else {
1467 /*
1468 * A create without a page_store may be either a host
1469 * side create (in which case we are waiting for the
1470 * guest side to supply the memory) or an old style
1471 * queue pair create (in which case we will expect a
1472 * set page store call as the next step).
1473 */
1474 entry->state = VMCIQPB_CREATED_NO_MEM;
1475 }
1476
1477 qp_list_add_entry(&qp_broker_list, &entry->qp);
1478 if (ent != NULL)
1479 *ent = entry;
1480
1481 /* Add to resource obj */
1482 result = vmci_resource_add(&entry->resource,
1483 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1484 handle);
1485 if (result != VMCI_SUCCESS) {
1486 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1487 handle.context, handle.resource, result);
1488 goto error;
1489 }
1490
1491 entry->qp.handle = vmci_resource_handle(&entry->resource);
1492 if (is_local) {
1493 vmci_q_header_init(entry->produce_q->q_header,
1494 entry->qp.handle);
1495 vmci_q_header_init(entry->consume_q->q_header,
1496 entry->qp.handle);
1497 }
1498
1499 vmci_ctx_qp_create(context, entry->qp.handle);
1500
1501 return VMCI_SUCCESS;
1502
1503 error:
1504 if (entry != NULL) {
1505 qp_host_free_queue(entry->produce_q, guest_produce_size);
1506 qp_host_free_queue(entry->consume_q, guest_consume_size);
1507 kfree(entry);
1508 }
1509
1510 return result;
1511}
1512
1513/*
1514 * Enqueues an event datagram to notify the peer VM attached to
1515 * the given queue pair handle about attach/detach event by the
1516 * given VM. Returns Payload size of datagram enqueued on
1517 * success, error code otherwise.
1518 */
1519static int qp_notify_peer(bool attach,
1520 struct vmci_handle handle,
1521 u32 my_id,
1522 u32 peer_id)
1523{
1524 int rv;
1525 struct vmci_event_qp ev;
1526
1527 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1528 peer_id == VMCI_INVALID_ID)
1529 return VMCI_ERROR_INVALID_ARGS;
1530
1531 /*
1532 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1533 * number of pending events from the hypervisor to a given VM
1534 * otherwise a rogue VM could do an arbitrary number of attach
1535 * and detach operations causing memory pressure in the host
1536 * kernel.
1537 */
1538
1539 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1540 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1541 VMCI_CONTEXT_RESOURCE_ID);
1542 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1543 ev.msg.event_data.event = attach ?
1544 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1545 ev.payload.handle = handle;
1546 ev.payload.peer_id = my_id;
1547
1548 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1549 &ev.msg.hdr, false);
1550 if (rv < VMCI_SUCCESS)
1551 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1552 attach ? "ATTACH" : "DETACH", peer_id);
1553
1554 return rv;
1555}
1556
1557/*
1558 * The second endpoint issuing a queue pair allocation will attach to
1559 * the queue pair registered with the queue pair broker.
1560 *
1561 * If the attacher is a guest, it will associate a VMX virtual address
1562 * range with the queue pair as specified by the page_store. At this
1563 * point, the already attach host endpoint may start using the queue
1564 * pair, and an attach event is sent to it. For compatibility with
1565 * older VMX'en, that used a separate step to set the VMX virtual
1566 * address range, the virtual address range can be registered later
1567 * using vmci_qp_broker_set_page_store. In that case, a page_store of
1568 * NULL should be used, and the attach event will be generated once
1569 * the actual page store has been set.
1570 *
1571 * If the attacher is the host, a page_store of NULL should be used as
1572 * well, since the page store information is already set by the guest.
1573 *
1574 * For new VMX and host callers, the queue pair will be moved to the
1575 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1576 * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1577 */
1578static int qp_broker_attach(struct qp_broker_entry *entry,
1579 u32 peer,
1580 u32 flags,
1581 u32 priv_flags,
1582 u64 produce_size,
1583 u64 consume_size,
1584 struct vmci_qp_page_store *page_store,
1585 struct vmci_ctx *context,
1586 vmci_event_release_cb wakeup_cb,
1587 void *client_data,
1588 struct qp_broker_entry **ent)
1589{
1590 const u32 context_id = vmci_ctx_get_id(context);
1591 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1592 int result;
1593
1594 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1595 entry->state != VMCIQPB_CREATED_MEM)
1596 return VMCI_ERROR_UNAVAILABLE;
1597
1598 if (is_local) {
1599 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1600 context_id != entry->create_id) {
1601 return VMCI_ERROR_INVALID_ARGS;
1602 }
1603 } else if (context_id == entry->create_id ||
1604 context_id == entry->attach_id) {
1605 return VMCI_ERROR_ALREADY_EXISTS;
1606 }
1607
1608 if (VMCI_CONTEXT_IS_VM(context_id) &&
1609 VMCI_CONTEXT_IS_VM(entry->create_id))
1610 return VMCI_ERROR_DST_UNREACHABLE;
1611
1612 /*
1613 * If we are attaching from a restricted context then the queuepair
1614 * must have been created by a trusted endpoint.
1615 */
1616 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1617 !entry->created_by_trusted)
1618 return VMCI_ERROR_NO_ACCESS;
1619
1620 /*
1621 * If we are attaching to a queuepair that was created by a restricted
1622 * context then we must be trusted.
1623 */
1624 if (entry->require_trusted_attach &&
1625 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1626 return VMCI_ERROR_NO_ACCESS;
1627
1628 /*
1629 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1630 * control check is not performed.
1631 */
1632 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1633 return VMCI_ERROR_NO_ACCESS;
1634
1635 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1636 /*
1637 * Do not attach if the caller doesn't support Host Queue Pairs
1638 * and a host created this queue pair.
1639 */
1640
1641 if (!vmci_ctx_supports_host_qp(context))
1642 return VMCI_ERROR_INVALID_RESOURCE;
1643
1644 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1645 struct vmci_ctx *create_context;
1646 bool supports_host_qp;
1647
1648 /*
1649 * Do not attach a host to a user created queue pair if that
1650 * user doesn't support host queue pair end points.
1651 */
1652
1653 create_context = vmci_ctx_get(entry->create_id);
1654 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1655 vmci_ctx_put(create_context);
1656
1657 if (!supports_host_qp)
1658 return VMCI_ERROR_INVALID_RESOURCE;
1659 }
1660
1661 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1662 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1663
1664 if (context_id != VMCI_HOST_CONTEXT_ID) {
1665 /*
1666 * The queue pair broker entry stores values from the guest
1667 * point of view, so an attaching guest should match the values
1668 * stored in the entry.
1669 */
1670
1671 if (entry->qp.produce_size != produce_size ||
1672 entry->qp.consume_size != consume_size) {
1673 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1674 }
1675 } else if (entry->qp.produce_size != consume_size ||
1676 entry->qp.consume_size != produce_size) {
1677 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1678 }
1679
1680 if (context_id != VMCI_HOST_CONTEXT_ID) {
1681 /*
1682 * If a guest attached to a queue pair, it will supply
1683 * the backing memory. If this is a pre NOVMVM vmx,
1684 * the backing memory will be supplied by calling
1685 * vmci_qp_broker_set_page_store() following the
1686 * return of the vmci_qp_broker_alloc() call. If it is
1687 * a vmx of version NOVMVM or later, the page store
1688 * must be supplied as part of the
1689 * vmci_qp_broker_alloc call. Under all circumstances
1690 * must the initially created queue pair not have any
1691 * memory associated with it already.
1692 */
1693
1694 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1695 return VMCI_ERROR_INVALID_ARGS;
1696
1697 if (page_store != NULL) {
1698 /*
1699 * Patch up host state to point to guest
1700 * supplied memory. The VMX already
1701 * initialized the queue pair headers, so no
1702 * need for the kernel side to do that.
1703 */
1704
1705 result = qp_host_register_user_memory(page_store,
1706 entry->produce_q,
1707 entry->consume_q);
1708 if (result < VMCI_SUCCESS)
1709 return result;
1710
06164d2b
GZ
1711 entry->state = VMCIQPB_ATTACHED_MEM;
1712 } else {
1713 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1714 }
1715 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1716 /*
1717 * The host side is attempting to attach to a queue
1718 * pair that doesn't have any memory associated with
1719 * it. This must be a pre NOVMVM vmx that hasn't set
1720 * the page store information yet, or a quiesced VM.
1721 */
1722
1723 return VMCI_ERROR_UNAVAILABLE;
1724 } else {
06164d2b
GZ
1725 /* The host side has successfully attached to a queue pair. */
1726 entry->state = VMCIQPB_ATTACHED_MEM;
1727 }
1728
1729 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1730 result =
1731 qp_notify_peer(true, entry->qp.handle, context_id,
1732 entry->create_id);
1733 if (result < VMCI_SUCCESS)
1734 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1735 entry->create_id, entry->qp.handle.context,
1736 entry->qp.handle.resource);
1737 }
1738
1739 entry->attach_id = context_id;
1740 entry->qp.ref_count++;
1741 if (wakeup_cb) {
1742 entry->wakeup_cb = wakeup_cb;
1743 entry->client_data = client_data;
1744 }
1745
1746 /*
1747 * When attaching to local queue pairs, the context already has
1748 * an entry tracking the queue pair, so don't add another one.
1749 */
1750 if (!is_local)
1751 vmci_ctx_qp_create(context, entry->qp.handle);
1752
1753 if (ent != NULL)
1754 *ent = entry;
1755
1756 return VMCI_SUCCESS;
1757}
1758
1759/*
1760 * queue_pair_Alloc for use when setting up queue pair endpoints
1761 * on the host.
1762 */
1763static int qp_broker_alloc(struct vmci_handle handle,
1764 u32 peer,
1765 u32 flags,
1766 u32 priv_flags,
1767 u64 produce_size,
1768 u64 consume_size,
1769 struct vmci_qp_page_store *page_store,
1770 struct vmci_ctx *context,
1771 vmci_event_release_cb wakeup_cb,
1772 void *client_data,
1773 struct qp_broker_entry **ent,
1774 bool *swap)
1775{
1776 const u32 context_id = vmci_ctx_get_id(context);
1777 bool create;
1778 struct qp_broker_entry *entry = NULL;
1779 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1780 int result;
1781
1782 if (vmci_handle_is_invalid(handle) ||
1783 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1784 !(produce_size || consume_size) ||
1785 !context || context_id == VMCI_INVALID_ID ||
1786 handle.context == VMCI_INVALID_ID) {
1787 return VMCI_ERROR_INVALID_ARGS;
1788 }
1789
1790 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1791 return VMCI_ERROR_INVALID_ARGS;
1792
1793 /*
1794 * In the initial argument check, we ensure that non-vmkernel hosts
1795 * are not allowed to create local queue pairs.
1796 */
1797
1798 mutex_lock(&qp_broker_list.mutex);
1799
1800 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1801 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1802 context_id, handle.context, handle.resource);
1803 mutex_unlock(&qp_broker_list.mutex);
1804 return VMCI_ERROR_ALREADY_EXISTS;
1805 }
1806
1807 if (handle.resource != VMCI_INVALID_ID)
1808 entry = qp_broker_handle_to_entry(handle);
1809
1810 if (!entry) {
1811 create = true;
1812 result =
1813 qp_broker_create(handle, peer, flags, priv_flags,
1814 produce_size, consume_size, page_store,
1815 context, wakeup_cb, client_data, ent);
1816 } else {
1817 create = false;
1818 result =
1819 qp_broker_attach(entry, peer, flags, priv_flags,
1820 produce_size, consume_size, page_store,
1821 context, wakeup_cb, client_data, ent);
1822 }
1823
1824 mutex_unlock(&qp_broker_list.mutex);
1825
1826 if (swap)
1827 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1828 !(create && is_local);
1829
1830 return result;
1831}
1832
1833/*
1834 * This function implements the kernel API for allocating a queue
1835 * pair.
1836 */
1837static int qp_alloc_host_work(struct vmci_handle *handle,
1838 struct vmci_queue **produce_q,
1839 u64 produce_size,
1840 struct vmci_queue **consume_q,
1841 u64 consume_size,
1842 u32 peer,
1843 u32 flags,
1844 u32 priv_flags,
1845 vmci_event_release_cb wakeup_cb,
1846 void *client_data)
1847{
1848 struct vmci_handle new_handle;
1849 struct vmci_ctx *context;
1850 struct qp_broker_entry *entry;
1851 int result;
1852 bool swap;
1853
1854 if (vmci_handle_is_invalid(*handle)) {
1855 new_handle = vmci_make_handle(
1856 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1857 } else
1858 new_handle = *handle;
1859
1860 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1861 entry = NULL;
1862 result =
1863 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1864 produce_size, consume_size, NULL, context,
1865 wakeup_cb, client_data, &entry, &swap);
1866 if (result == VMCI_SUCCESS) {
1867 if (swap) {
1868 /*
1869 * If this is a local queue pair, the attacher
1870 * will swap around produce and consume
1871 * queues.
1872 */
1873
1874 *produce_q = entry->consume_q;
1875 *consume_q = entry->produce_q;
1876 } else {
1877 *produce_q = entry->produce_q;
1878 *consume_q = entry->consume_q;
1879 }
1880
1881 *handle = vmci_resource_handle(&entry->resource);
1882 } else {
1883 *handle = VMCI_INVALID_HANDLE;
1884 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1885 result);
1886 }
1887 vmci_ctx_put(context);
1888 return result;
1889}
1890
1891/*
1892 * Allocates a VMCI queue_pair. Only checks validity of input
1893 * arguments. The real work is done in the host or guest
1894 * specific function.
1895 */
1896int vmci_qp_alloc(struct vmci_handle *handle,
1897 struct vmci_queue **produce_q,
1898 u64 produce_size,
1899 struct vmci_queue **consume_q,
1900 u64 consume_size,
1901 u32 peer,
1902 u32 flags,
1903 u32 priv_flags,
1904 bool guest_endpoint,
1905 vmci_event_release_cb wakeup_cb,
1906 void *client_data)
1907{
1908 if (!handle || !produce_q || !consume_q ||
1909 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1910 return VMCI_ERROR_INVALID_ARGS;
1911
1912 if (guest_endpoint) {
1913 return qp_alloc_guest_work(handle, produce_q,
1914 produce_size, consume_q,
1915 consume_size, peer,
1916 flags, priv_flags);
1917 } else {
1918 return qp_alloc_host_work(handle, produce_q,
1919 produce_size, consume_q,
1920 consume_size, peer, flags,
1921 priv_flags, wakeup_cb, client_data);
1922 }
1923}
1924
1925/*
1926 * This function implements the host kernel API for detaching from
1927 * a queue pair.
1928 */
1929static int qp_detatch_host_work(struct vmci_handle handle)
1930{
1931 int result;
1932 struct vmci_ctx *context;
1933
1934 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1935
1936 result = vmci_qp_broker_detach(handle, context);
1937
1938 vmci_ctx_put(context);
1939 return result;
1940}
1941
1942/*
1943 * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1944 * Real work is done in the host or guest specific function.
1945 */
1946static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1947{
1948 if (vmci_handle_is_invalid(handle))
1949 return VMCI_ERROR_INVALID_ARGS;
1950
1951 if (guest_endpoint)
1952 return qp_detatch_guest_work(handle);
1953 else
1954 return qp_detatch_host_work(handle);
1955}
1956
1957/*
1958 * Returns the entry from the head of the list. Assumes that the list is
1959 * locked.
1960 */
1961static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1962{
1963 if (!list_empty(&qp_list->head)) {
1964 struct qp_entry *entry =
1965 list_first_entry(&qp_list->head, struct qp_entry,
1966 list_item);
1967 return entry;
1968 }
1969
1970 return NULL;
1971}
1972
1973void vmci_qp_broker_exit(void)
1974{
1975 struct qp_entry *entry;
1976 struct qp_broker_entry *be;
1977
1978 mutex_lock(&qp_broker_list.mutex);
1979
1980 while ((entry = qp_list_get_head(&qp_broker_list))) {
1981 be = (struct qp_broker_entry *)entry;
1982
1983 qp_list_remove_entry(&qp_broker_list, entry);
1984 kfree(be);
1985 }
1986
1987 mutex_unlock(&qp_broker_list.mutex);
1988}
1989
1990/*
1991 * Requests that a queue pair be allocated with the VMCI queue
1992 * pair broker. Allocates a queue pair entry if one does not
1993 * exist. Attaches to one if it exists, and retrieves the page
1994 * files backing that queue_pair. Assumes that the queue pair
1995 * broker lock is held.
1996 */
1997int vmci_qp_broker_alloc(struct vmci_handle handle,
1998 u32 peer,
1999 u32 flags,
2000 u32 priv_flags,
2001 u64 produce_size,
2002 u64 consume_size,
2003 struct vmci_qp_page_store *page_store,
2004 struct vmci_ctx *context)
2005{
2006 return qp_broker_alloc(handle, peer, flags, priv_flags,
2007 produce_size, consume_size,
2008 page_store, context, NULL, NULL, NULL, NULL);
2009}
2010
2011/*
2012 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
2013 * step to add the UVAs of the VMX mapping of the queue pair. This function
2014 * provides backwards compatibility with such VMX'en, and takes care of
2015 * registering the page store for a queue pair previously allocated by the
2016 * VMX during create or attach. This function will move the queue pair state
2017 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
2018 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
2019 * attached state with memory, the queue pair is ready to be used by the
2020 * host peer, and an attached event will be generated.
2021 *
2022 * Assumes that the queue pair broker lock is held.
2023 *
2024 * This function is only used by the hosted platform, since there is no
2025 * issue with backwards compatibility for vmkernel.
2026 */
2027int vmci_qp_broker_set_page_store(struct vmci_handle handle,
2028 u64 produce_uva,
2029 u64 consume_uva,
2030 struct vmci_ctx *context)
2031{
2032 struct qp_broker_entry *entry;
2033 int result;
2034 const u32 context_id = vmci_ctx_get_id(context);
2035
2036 if (vmci_handle_is_invalid(handle) || !context ||
2037 context_id == VMCI_INVALID_ID)
2038 return VMCI_ERROR_INVALID_ARGS;
2039
2040 /*
2041 * We only support guest to host queue pairs, so the VMX must
2042 * supply UVAs for the mapped page files.
2043 */
2044
2045 if (produce_uva == 0 || consume_uva == 0)
2046 return VMCI_ERROR_INVALID_ARGS;
2047
2048 mutex_lock(&qp_broker_list.mutex);
2049
2050 if (!vmci_ctx_qp_exists(context, handle)) {
2051 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2052 context_id, handle.context, handle.resource);
2053 result = VMCI_ERROR_NOT_FOUND;
2054 goto out;
2055 }
2056
2057 entry = qp_broker_handle_to_entry(handle);
2058 if (!entry) {
2059 result = VMCI_ERROR_NOT_FOUND;
2060 goto out;
2061 }
2062
2063 /*
2064 * If I'm the owner then I can set the page store.
2065 *
2066 * Or, if a host created the queue_pair and I'm the attached peer
2067 * then I can set the page store.
2068 */
2069 if (entry->create_id != context_id &&
2070 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2071 entry->attach_id != context_id)) {
2072 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2073 goto out;
2074 }
2075
2076 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2077 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2078 result = VMCI_ERROR_UNAVAILABLE;
2079 goto out;
2080 }
2081
2082 result = qp_host_get_user_memory(produce_uva, consume_uva,
2083 entry->produce_q, entry->consume_q);
2084 if (result < VMCI_SUCCESS)
2085 goto out;
2086
2087 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2088 if (result < VMCI_SUCCESS) {
2089 qp_host_unregister_user_memory(entry->produce_q,
2090 entry->consume_q);
2091 goto out;
2092 }
2093
2094 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2095 entry->state = VMCIQPB_CREATED_MEM;
2096 else
2097 entry->state = VMCIQPB_ATTACHED_MEM;
2098
2099 entry->vmci_page_files = true;
2100
2101 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2102 result =
2103 qp_notify_peer(true, handle, context_id, entry->create_id);
2104 if (result < VMCI_SUCCESS) {
2105 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2106 entry->create_id, entry->qp.handle.context,
2107 entry->qp.handle.resource);
2108 }
2109 }
2110
2111 result = VMCI_SUCCESS;
2112 out:
2113 mutex_unlock(&qp_broker_list.mutex);
2114 return result;
2115}
2116
2117/*
2118 * Resets saved queue headers for the given QP broker
2119 * entry. Should be used when guest memory becomes available
2120 * again, or the guest detaches.
2121 */
2122static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2123{
2124 entry->produce_q->saved_header = NULL;
2125 entry->consume_q->saved_header = NULL;
2126}
2127
2128/*
2129 * The main entry point for detaching from a queue pair registered with the
2130 * queue pair broker. If more than one endpoint is attached to the queue
2131 * pair, the first endpoint will mainly decrement a reference count and
2132 * generate a notification to its peer. The last endpoint will clean up
2133 * the queue pair state registered with the broker.
2134 *
2135 * When a guest endpoint detaches, it will unmap and unregister the guest
2136 * memory backing the queue pair. If the host is still attached, it will
2137 * no longer be able to access the queue pair content.
2138 *
2139 * If the queue pair is already in a state where there is no memory
2140 * registered for the queue pair (any *_NO_MEM state), it will transition to
2141 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2142 * endpoint is the first of two endpoints to detach. If the host endpoint is
2143 * the first out of two to detach, the queue pair will move to the
2144 * VMCIQPB_SHUTDOWN_MEM state.
2145 */
2146int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2147{
2148 struct qp_broker_entry *entry;
2149 const u32 context_id = vmci_ctx_get_id(context);
2150 u32 peer_id;
2151 bool is_local = false;
2152 int result;
2153
2154 if (vmci_handle_is_invalid(handle) || !context ||
2155 context_id == VMCI_INVALID_ID) {
2156 return VMCI_ERROR_INVALID_ARGS;
2157 }
2158
2159 mutex_lock(&qp_broker_list.mutex);
2160
2161 if (!vmci_ctx_qp_exists(context, handle)) {
2162 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2163 context_id, handle.context, handle.resource);
2164 result = VMCI_ERROR_NOT_FOUND;
2165 goto out;
2166 }
2167
2168 entry = qp_broker_handle_to_entry(handle);
2169 if (!entry) {
2170 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2171 context_id, handle.context, handle.resource);
2172 result = VMCI_ERROR_NOT_FOUND;
2173 goto out;
2174 }
2175
2176 if (context_id != entry->create_id && context_id != entry->attach_id) {
2177 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2178 goto out;
2179 }
2180
2181 if (context_id == entry->create_id) {
2182 peer_id = entry->attach_id;
2183 entry->create_id = VMCI_INVALID_ID;
2184 } else {
2185 peer_id = entry->create_id;
2186 entry->attach_id = VMCI_INVALID_ID;
2187 }
2188 entry->qp.ref_count--;
2189
2190 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2191
2192 if (context_id != VMCI_HOST_CONTEXT_ID) {
2193 bool headers_mapped;
2194
2195 /*
2196 * Pre NOVMVM vmx'en may detach from a queue pair
2197 * before setting the page store, and in that case
2198 * there is no user memory to detach from. Also, more
2199 * recent VMX'en may detach from a queue pair in the
2200 * quiesced state.
2201 */
2202
2203 qp_acquire_queue_mutex(entry->produce_q);
2204 headers_mapped = entry->produce_q->q_header ||
2205 entry->consume_q->q_header;
2206 if (QPBROKERSTATE_HAS_MEM(entry)) {
2207 result =
2208 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2209 entry->produce_q,
2210 entry->consume_q);
2211 if (result < VMCI_SUCCESS)
2212 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2213 handle.context, handle.resource,
2214 result);
2215
2216 if (entry->vmci_page_files)
2217 qp_host_unregister_user_memory(entry->produce_q,
2218 entry->
2219 consume_q);
2220 else
2221 qp_host_unregister_user_memory(entry->produce_q,
2222 entry->
2223 consume_q);
2224
2225 }
2226
2227 if (!headers_mapped)
2228 qp_reset_saved_headers(entry);
2229
2230 qp_release_queue_mutex(entry->produce_q);
2231
2232 if (!headers_mapped && entry->wakeup_cb)
2233 entry->wakeup_cb(entry->client_data);
2234
2235 } else {
2236 if (entry->wakeup_cb) {
2237 entry->wakeup_cb = NULL;
2238 entry->client_data = NULL;
2239 }
2240 }
2241
2242 if (entry->qp.ref_count == 0) {
2243 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2244
2245 if (is_local)
2246 kfree(entry->local_mem);
2247
2248 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2249 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2250 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2251 /* Unlink from resource hash table and free callback */
2252 vmci_resource_remove(&entry->resource);
2253
2254 kfree(entry);
2255
2256 vmci_ctx_qp_destroy(context, handle);
2257 } else {
2258 qp_notify_peer(false, handle, context_id, peer_id);
2259 if (context_id == VMCI_HOST_CONTEXT_ID &&
2260 QPBROKERSTATE_HAS_MEM(entry)) {
2261 entry->state = VMCIQPB_SHUTDOWN_MEM;
2262 } else {
2263 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2264 }
2265
2266 if (!is_local)
2267 vmci_ctx_qp_destroy(context, handle);
2268
2269 }
2270 result = VMCI_SUCCESS;
2271 out:
2272 mutex_unlock(&qp_broker_list.mutex);
2273 return result;
2274}
2275
2276/*
2277 * Establishes the necessary mappings for a queue pair given a
2278 * reference to the queue pair guest memory. This is usually
2279 * called when a guest is unquiesced and the VMX is allowed to
2280 * map guest memory once again.
2281 */
2282int vmci_qp_broker_map(struct vmci_handle handle,
2283 struct vmci_ctx *context,
2284 u64 guest_mem)
2285{
2286 struct qp_broker_entry *entry;
2287 const u32 context_id = vmci_ctx_get_id(context);
2288 bool is_local = false;
2289 int result;
2290
2291 if (vmci_handle_is_invalid(handle) || !context ||
2292 context_id == VMCI_INVALID_ID)
2293 return VMCI_ERROR_INVALID_ARGS;
2294
2295 mutex_lock(&qp_broker_list.mutex);
2296
2297 if (!vmci_ctx_qp_exists(context, handle)) {
2298 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2299 context_id, handle.context, handle.resource);
2300 result = VMCI_ERROR_NOT_FOUND;
2301 goto out;
2302 }
2303
2304 entry = qp_broker_handle_to_entry(handle);
2305 if (!entry) {
2306 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2307 context_id, handle.context, handle.resource);
2308 result = VMCI_ERROR_NOT_FOUND;
2309 goto out;
2310 }
2311
2312 if (context_id != entry->create_id && context_id != entry->attach_id) {
2313 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2314 goto out;
2315 }
2316
2317 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2318 result = VMCI_SUCCESS;
2319
2320 if (context_id != VMCI_HOST_CONTEXT_ID) {
2321 struct vmci_qp_page_store page_store;
2322
2323 page_store.pages = guest_mem;
2324 page_store.len = QPE_NUM_PAGES(entry->qp);
2325
2326 qp_acquire_queue_mutex(entry->produce_q);
2327 qp_reset_saved_headers(entry);
2328 result =
2329 qp_host_register_user_memory(&page_store,
2330 entry->produce_q,
2331 entry->consume_q);
2332 qp_release_queue_mutex(entry->produce_q);
2333 if (result == VMCI_SUCCESS) {
2334 /* Move state from *_NO_MEM to *_MEM */
2335
2336 entry->state++;
2337
2338 if (entry->wakeup_cb)
2339 entry->wakeup_cb(entry->client_data);
2340 }
2341 }
2342
2343 out:
2344 mutex_unlock(&qp_broker_list.mutex);
2345 return result;
2346}
2347
2348/*
2349 * Saves a snapshot of the queue headers for the given QP broker
2350 * entry. Should be used when guest memory is unmapped.
2351 * Results:
2352 * VMCI_SUCCESS on success, appropriate error code if guest memory
2353 * can't be accessed..
2354 */
2355static int qp_save_headers(struct qp_broker_entry *entry)
2356{
2357 int result;
2358
2359 if (entry->produce_q->saved_header != NULL &&
2360 entry->consume_q->saved_header != NULL) {
2361 /*
2362 * If the headers have already been saved, we don't need to do
2363 * it again, and we don't want to map in the headers
2364 * unnecessarily.
2365 */
2366
2367 return VMCI_SUCCESS;
2368 }
2369
2370 if (NULL == entry->produce_q->q_header ||
2371 NULL == entry->consume_q->q_header) {
2372 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2373 if (result < VMCI_SUCCESS)
2374 return result;
2375 }
2376
2377 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2378 sizeof(entry->saved_produce_q));
2379 entry->produce_q->saved_header = &entry->saved_produce_q;
2380 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2381 sizeof(entry->saved_consume_q));
2382 entry->consume_q->saved_header = &entry->saved_consume_q;
2383
2384 return VMCI_SUCCESS;
2385}
2386
2387/*
2388 * Removes all references to the guest memory of a given queue pair, and
2389 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2390 * called when a VM is being quiesced where access to guest memory should
2391 * avoided.
2392 */
2393int vmci_qp_broker_unmap(struct vmci_handle handle,
2394 struct vmci_ctx *context,
2395 u32 gid)
2396{
2397 struct qp_broker_entry *entry;
2398 const u32 context_id = vmci_ctx_get_id(context);
2399 bool is_local = false;
2400 int result;
2401
2402 if (vmci_handle_is_invalid(handle) || !context ||
2403 context_id == VMCI_INVALID_ID)
2404 return VMCI_ERROR_INVALID_ARGS;
2405
2406 mutex_lock(&qp_broker_list.mutex);
2407
2408 if (!vmci_ctx_qp_exists(context, handle)) {
2409 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2410 context_id, handle.context, handle.resource);
2411 result = VMCI_ERROR_NOT_FOUND;
2412 goto out;
2413 }
2414
2415 entry = qp_broker_handle_to_entry(handle);
2416 if (!entry) {
2417 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2418 context_id, handle.context, handle.resource);
2419 result = VMCI_ERROR_NOT_FOUND;
2420 goto out;
2421 }
2422
2423 if (context_id != entry->create_id && context_id != entry->attach_id) {
2424 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2425 goto out;
2426 }
2427
2428 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2429
2430 if (context_id != VMCI_HOST_CONTEXT_ID) {
2431 qp_acquire_queue_mutex(entry->produce_q);
2432 result = qp_save_headers(entry);
2433 if (result < VMCI_SUCCESS)
2434 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2435 handle.context, handle.resource, result);
2436
2437 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2438
2439 /*
2440 * On hosted, when we unmap queue pairs, the VMX will also
2441 * unmap the guest memory, so we invalidate the previously
2442 * registered memory. If the queue pair is mapped again at a
2443 * later point in time, we will need to reregister the user
2444 * memory with a possibly new user VA.
2445 */
2446 qp_host_unregister_user_memory(entry->produce_q,
2447 entry->consume_q);
2448
2449 /*
2450 * Move state from *_MEM to *_NO_MEM.
2451 */
2452 entry->state--;
2453
2454 qp_release_queue_mutex(entry->produce_q);
2455 }
2456
2457 result = VMCI_SUCCESS;
2458
2459 out:
2460 mutex_unlock(&qp_broker_list.mutex);
2461 return result;
2462}
2463
2464/*
2465 * Destroys all guest queue pair endpoints. If active guest queue
2466 * pairs still exist, hypercalls to attempt detach from these
2467 * queue pairs will be made. Any failure to detach is silently
2468 * ignored.
2469 */
2470void vmci_qp_guest_endpoints_exit(void)
2471{
2472 struct qp_entry *entry;
2473 struct qp_guest_endpoint *ep;
2474
2475 mutex_lock(&qp_guest_endpoints.mutex);
2476
2477 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2478 ep = (struct qp_guest_endpoint *)entry;
2479
2480 /* Don't make a hypercall for local queue_pairs. */
2481 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2482 qp_detatch_hypercall(entry->handle);
2483
2484 /* We cannot fail the exit, so let's reset ref_count. */
2485 entry->ref_count = 0;
2486 qp_list_remove_entry(&qp_guest_endpoints, entry);
2487
2488 qp_guest_endpoint_destroy(ep);
2489 }
2490
2491 mutex_unlock(&qp_guest_endpoints.mutex);
2492}
2493
2494/*
2495 * Helper routine that will lock the queue pair before subsequent
2496 * operations.
2497 * Note: Non-blocking on the host side is currently only implemented in ESX.
2498 * Since non-blocking isn't yet implemented on the host personality we
2499 * have no reason to acquire a spin lock. So to avoid the use of an
2500 * unnecessary lock only acquire the mutex if we can block.
06164d2b
GZ
2501 */
2502static void qp_lock(const struct vmci_qp *qpair)
2503{
45412bef 2504 qp_acquire_queue_mutex(qpair->produce_q);
06164d2b
GZ
2505}
2506
2507/*
2508 * Helper routine that unlocks the queue pair after calling
45412bef 2509 * qp_lock.
06164d2b
GZ
2510 */
2511static void qp_unlock(const struct vmci_qp *qpair)
2512{
45412bef 2513 qp_release_queue_mutex(qpair->produce_q);
06164d2b
GZ
2514}
2515
2516/*
2517 * The queue headers may not be mapped at all times. If a queue is
2518 * currently not mapped, it will be attempted to do so.
2519 */
2520static int qp_map_queue_headers(struct vmci_queue *produce_q,
45412bef 2521 struct vmci_queue *consume_q)
06164d2b
GZ
2522{
2523 int result;
2524
2525 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
45412bef 2526 result = qp_host_map_queues(produce_q, consume_q);
06164d2b
GZ
2527 if (result < VMCI_SUCCESS)
2528 return (produce_q->saved_header &&
2529 consume_q->saved_header) ?
2530 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2531 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2532 }
2533
2534 return VMCI_SUCCESS;
2535}
2536
2537/*
2538 * Helper routine that will retrieve the produce and consume
2539 * headers of a given queue pair. If the guest memory of the
2540 * queue pair is currently not available, the saved queue headers
2541 * will be returned, if these are available.
2542 */
2543static int qp_get_queue_headers(const struct vmci_qp *qpair,
2544 struct vmci_queue_header **produce_q_header,
2545 struct vmci_queue_header **consume_q_header)
2546{
2547 int result;
2548
45412bef 2549 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
06164d2b
GZ
2550 if (result == VMCI_SUCCESS) {
2551 *produce_q_header = qpair->produce_q->q_header;
2552 *consume_q_header = qpair->consume_q->q_header;
2553 } else if (qpair->produce_q->saved_header &&
2554 qpair->consume_q->saved_header) {
2555 *produce_q_header = qpair->produce_q->saved_header;
2556 *consume_q_header = qpair->consume_q->saved_header;
2557 result = VMCI_SUCCESS;
2558 }
2559
2560 return result;
2561}
2562
2563/*
2564 * Callback from VMCI queue pair broker indicating that a queue
2565 * pair that was previously not ready, now either is ready or
2566 * gone forever.
2567 */
2568static int qp_wakeup_cb(void *client_data)
2569{
2570 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2571
2572 qp_lock(qpair);
2573 while (qpair->blocked > 0) {
2574 qpair->blocked--;
2575 qpair->generation++;
2576 wake_up(&qpair->event);
2577 }
2578 qp_unlock(qpair);
2579
2580 return VMCI_SUCCESS;
2581}
2582
2583/*
2584 * Makes the calling thread wait for the queue pair to become
2585 * ready for host side access. Returns true when thread is
2586 * woken up after queue pair state change, false otherwise.
2587 */
2588static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2589{
2590 unsigned int generation;
2591
06164d2b
GZ
2592 qpair->blocked++;
2593 generation = qpair->generation;
2594 qp_unlock(qpair);
2595 wait_event(qpair->event, generation != qpair->generation);
2596 qp_lock(qpair);
2597
2598 return true;
2599}
2600
2601/*
2602 * Enqueues a given buffer to the produce queue using the provided
2603 * function. As many bytes as possible (space available in the queue)
2604 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2605 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2606 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2607 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2608 * an error occured when accessing the buffer,
2609 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2610 * available. Otherwise, the number of bytes written to the queue is
2611 * returned. Updates the tail pointer of the produce queue.
2612 */
2613static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2614 struct vmci_queue *consume_q,
2615 const u64 produce_q_size,
2616 const void *buf,
2617 size_t buf_size,
45412bef 2618 vmci_memcpy_to_queue_func memcpy_to_queue)
06164d2b
GZ
2619{
2620 s64 free_space;
2621 u64 tail;
2622 size_t written;
2623 ssize_t result;
2624
45412bef 2625 result = qp_map_queue_headers(produce_q, consume_q);
06164d2b
GZ
2626 if (unlikely(result != VMCI_SUCCESS))
2627 return result;
2628
2629 free_space = vmci_q_header_free_space(produce_q->q_header,
2630 consume_q->q_header,
2631 produce_q_size);
2632 if (free_space == 0)
2633 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2634
2635 if (free_space < VMCI_SUCCESS)
2636 return (ssize_t) free_space;
2637
2638 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2639 tail = vmci_q_header_producer_tail(produce_q->q_header);
2640 if (likely(tail + written < produce_q_size)) {
2641 result = memcpy_to_queue(produce_q, tail, buf, 0, written);
2642 } else {
2643 /* Tail pointer wraps around. */
2644
2645 const size_t tmp = (size_t) (produce_q_size - tail);
2646
2647 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
2648 if (result >= VMCI_SUCCESS)
2649 result = memcpy_to_queue(produce_q, 0, buf, tmp,
2650 written - tmp);
2651 }
2652
2653 if (result < VMCI_SUCCESS)
2654 return result;
2655
2656 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2657 produce_q_size);
2658 return written;
2659}
2660
2661/*
2662 * Dequeues data (if available) from the given consume queue. Writes data
2663 * to the user provided buffer using the provided function.
2664 * Assumes the queue->mutex has been acquired.
2665 * Results:
2666 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2667 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2668 * (as defined by the queue size).
2669 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2670 * Otherwise the number of bytes dequeued is returned.
2671 * Side effects:
2672 * Updates the head pointer of the consume queue.
2673 */
2674static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2675 struct vmci_queue *consume_q,
2676 const u64 consume_q_size,
2677 void *buf,
2678 size_t buf_size,
2679 vmci_memcpy_from_queue_func memcpy_from_queue,
45412bef 2680 bool update_consumer)
06164d2b
GZ
2681{
2682 s64 buf_ready;
2683 u64 head;
2684 size_t read;
2685 ssize_t result;
2686
45412bef 2687 result = qp_map_queue_headers(produce_q, consume_q);
06164d2b
GZ
2688 if (unlikely(result != VMCI_SUCCESS))
2689 return result;
2690
2691 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2692 produce_q->q_header,
2693 consume_q_size);
2694 if (buf_ready == 0)
2695 return VMCI_ERROR_QUEUEPAIR_NODATA;
2696
2697 if (buf_ready < VMCI_SUCCESS)
2698 return (ssize_t) buf_ready;
2699
2700 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2701 head = vmci_q_header_consumer_head(produce_q->q_header);
2702 if (likely(head + read < consume_q_size)) {
2703 result = memcpy_from_queue(buf, 0, consume_q, head, read);
2704 } else {
2705 /* Head pointer wraps around. */
2706
2707 const size_t tmp = (size_t) (consume_q_size - head);
2708
2709 result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
2710 if (result >= VMCI_SUCCESS)
2711 result = memcpy_from_queue(buf, tmp, consume_q, 0,
2712 read - tmp);
2713
2714 }
2715
2716 if (result < VMCI_SUCCESS)
2717 return result;
2718
2719 if (update_consumer)
2720 vmci_q_header_add_consumer_head(produce_q->q_header,
2721 read, consume_q_size);
2722
2723 return read;
2724}
2725
2726/*
2727 * vmci_qpair_alloc() - Allocates a queue pair.
2728 * @qpair: Pointer for the new vmci_qp struct.
2729 * @handle: Handle to track the resource.
2730 * @produce_qsize: Desired size of the producer queue.
2731 * @consume_qsize: Desired size of the consumer queue.
2732 * @peer: ContextID of the peer.
2733 * @flags: VMCI flags.
2734 * @priv_flags: VMCI priviledge flags.
2735 *
2736 * This is the client interface for allocating the memory for a
2737 * vmci_qp structure and then attaching to the underlying
2738 * queue. If an error occurs allocating the memory for the
2739 * vmci_qp structure no attempt is made to attach. If an
2740 * error occurs attaching, then the structure is freed.
2741 */
2742int vmci_qpair_alloc(struct vmci_qp **qpair,
2743 struct vmci_handle *handle,
2744 u64 produce_qsize,
2745 u64 consume_qsize,
2746 u32 peer,
2747 u32 flags,
2748 u32 priv_flags)
2749{
2750 struct vmci_qp *my_qpair;
2751 int retval;
2752 struct vmci_handle src = VMCI_INVALID_HANDLE;
2753 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2754 enum vmci_route route;
2755 vmci_event_release_cb wakeup_cb;
2756 void *client_data;
2757
2758 /*
2759 * Restrict the size of a queuepair. The device already
2760 * enforces a limit on the total amount of memory that can be
2761 * allocated to queuepairs for a guest. However, we try to
2762 * allocate this memory before we make the queuepair
2763 * allocation hypercall. On Linux, we allocate each page
2764 * separately, which means rather than fail, the guest will
2765 * thrash while it tries to allocate, and will become
2766 * increasingly unresponsive to the point where it appears to
2767 * be hung. So we place a limit on the size of an individual
2768 * queuepair here, and leave the device to enforce the
2769 * restriction on total queuepair memory. (Note that this
2770 * doesn't prevent all cases; a user with only this much
2771 * physical memory could still get into trouble.) The error
2772 * used by the device is NO_RESOURCES, so use that here too.
2773 */
2774
2775 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2776 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2777 return VMCI_ERROR_NO_RESOURCES;
2778
2779 retval = vmci_route(&src, &dst, false, &route);
2780 if (retval < VMCI_SUCCESS)
2781 route = vmci_guest_code_active() ?
2782 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2783
45412bef
AK
2784 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2785 pr_devel("NONBLOCK OR PINNED set");
06164d2b
GZ
2786 return VMCI_ERROR_INVALID_ARGS;
2787 }
2788
06164d2b
GZ
2789 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2790 if (!my_qpair)
2791 return VMCI_ERROR_NO_MEM;
2792
2793 my_qpair->produce_q_size = produce_qsize;
2794 my_qpair->consume_q_size = consume_qsize;
2795 my_qpair->peer = peer;
2796 my_qpair->flags = flags;
2797 my_qpair->priv_flags = priv_flags;
2798
2799 wakeup_cb = NULL;
2800 client_data = NULL;
2801
2802 if (VMCI_ROUTE_AS_HOST == route) {
2803 my_qpair->guest_endpoint = false;
2804 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2805 my_qpair->blocked = 0;
2806 my_qpair->generation = 0;
2807 init_waitqueue_head(&my_qpair->event);
2808 wakeup_cb = qp_wakeup_cb;
2809 client_data = (void *)my_qpair;
2810 }
2811 } else {
2812 my_qpair->guest_endpoint = true;
2813 }
2814
2815 retval = vmci_qp_alloc(handle,
2816 &my_qpair->produce_q,
2817 my_qpair->produce_q_size,
2818 &my_qpair->consume_q,
2819 my_qpair->consume_q_size,
2820 my_qpair->peer,
2821 my_qpair->flags,
2822 my_qpair->priv_flags,
2823 my_qpair->guest_endpoint,
2824 wakeup_cb, client_data);
2825
2826 if (retval < VMCI_SUCCESS) {
2827 kfree(my_qpair);
2828 return retval;
2829 }
2830
2831 *qpair = my_qpair;
2832 my_qpair->handle = *handle;
2833
2834 return retval;
2835}
2836EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2837
2838/*
2839 * vmci_qpair_detach() - Detatches the client from a queue pair.
2840 * @qpair: Reference of a pointer to the qpair struct.
2841 *
2842 * This is the client interface for detaching from a VMCIQPair.
2843 * Note that this routine will free the memory allocated for the
2844 * vmci_qp structure too.
2845 */
2846int vmci_qpair_detach(struct vmci_qp **qpair)
2847{
2848 int result;
2849 struct vmci_qp *old_qpair;
2850
2851 if (!qpair || !(*qpair))
2852 return VMCI_ERROR_INVALID_ARGS;
2853
2854 old_qpair = *qpair;
2855 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2856
2857 /*
2858 * The guest can fail to detach for a number of reasons, and
2859 * if it does so, it will cleanup the entry (if there is one).
2860 * The host can fail too, but it won't cleanup the entry
2861 * immediately, it will do that later when the context is
2862 * freed. Either way, we need to release the qpair struct
2863 * here; there isn't much the caller can do, and we don't want
2864 * to leak.
2865 */
2866
2867 memset(old_qpair, 0, sizeof(*old_qpair));
2868 old_qpair->handle = VMCI_INVALID_HANDLE;
2869 old_qpair->peer = VMCI_INVALID_ID;
2870 kfree(old_qpair);
2871 *qpair = NULL;
2872
2873 return result;
2874}
2875EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2876
2877/*
2878 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2879 * @qpair: Pointer to the queue pair struct.
2880 * @producer_tail: Reference used for storing producer tail index.
2881 * @consumer_head: Reference used for storing the consumer head index.
2882 *
2883 * This is the client interface for getting the current indexes of the
2884 * QPair from the point of the view of the caller as the producer.
2885 */
2886int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2887 u64 *producer_tail,
2888 u64 *consumer_head)
2889{
2890 struct vmci_queue_header *produce_q_header;
2891 struct vmci_queue_header *consume_q_header;
2892 int result;
2893
2894 if (!qpair)
2895 return VMCI_ERROR_INVALID_ARGS;
2896
2897 qp_lock(qpair);
2898 result =
2899 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2900 if (result == VMCI_SUCCESS)
2901 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2902 producer_tail, consumer_head);
2903 qp_unlock(qpair);
2904
2905 if (result == VMCI_SUCCESS &&
2906 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2907 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2908 return VMCI_ERROR_INVALID_SIZE;
2909
2910 return result;
2911}
2912EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2913
2914/*
2915 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
2916 * @qpair: Pointer to the queue pair struct.
2917 * @consumer_tail: Reference used for storing consumer tail index.
2918 * @producer_head: Reference used for storing the producer head index.
2919 *
2920 * This is the client interface for getting the current indexes of the
2921 * QPair from the point of the view of the caller as the consumer.
2922 */
2923int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2924 u64 *consumer_tail,
2925 u64 *producer_head)
2926{
2927 struct vmci_queue_header *produce_q_header;
2928 struct vmci_queue_header *consume_q_header;
2929 int result;
2930
2931 if (!qpair)
2932 return VMCI_ERROR_INVALID_ARGS;
2933
2934 qp_lock(qpair);
2935 result =
2936 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2937 if (result == VMCI_SUCCESS)
2938 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2939 consumer_tail, producer_head);
2940 qp_unlock(qpair);
2941
2942 if (result == VMCI_SUCCESS &&
2943 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2944 (producer_head && *producer_head >= qpair->consume_q_size)))
2945 return VMCI_ERROR_INVALID_SIZE;
2946
2947 return result;
2948}
2949EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2950
2951/*
2952 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2953 * @qpair: Pointer to the queue pair struct.
2954 *
2955 * This is the client interface for getting the amount of free
2956 * space in the QPair from the point of the view of the caller as
2957 * the producer which is the common case. Returns < 0 if err, else
2958 * available bytes into which data can be enqueued if > 0.
2959 */
2960s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2961{
2962 struct vmci_queue_header *produce_q_header;
2963 struct vmci_queue_header *consume_q_header;
2964 s64 result;
2965
2966 if (!qpair)
2967 return VMCI_ERROR_INVALID_ARGS;
2968
2969 qp_lock(qpair);
2970 result =
2971 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2972 if (result == VMCI_SUCCESS)
2973 result = vmci_q_header_free_space(produce_q_header,
2974 consume_q_header,
2975 qpair->produce_q_size);
2976 else
2977 result = 0;
2978
2979 qp_unlock(qpair);
2980
2981 return result;
2982}
2983EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2984
2985/*
2986 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
2987 * @qpair: Pointer to the queue pair struct.
2988 *
2989 * This is the client interface for getting the amount of free
2990 * space in the QPair from the point of the view of the caller as
2991 * the consumer which is not the common case. Returns < 0 if err, else
2992 * available bytes into which data can be enqueued if > 0.
2993 */
2994s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2995{
2996 struct vmci_queue_header *produce_q_header;
2997 struct vmci_queue_header *consume_q_header;
2998 s64 result;
2999
3000 if (!qpair)
3001 return VMCI_ERROR_INVALID_ARGS;
3002
3003 qp_lock(qpair);
3004 result =
3005 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3006 if (result == VMCI_SUCCESS)
3007 result = vmci_q_header_free_space(consume_q_header,
3008 produce_q_header,
3009 qpair->consume_q_size);
3010 else
3011 result = 0;
3012
3013 qp_unlock(qpair);
3014
3015 return result;
3016}
3017EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
3018
3019/*
3020 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
3021 * producer queue.
3022 * @qpair: Pointer to the queue pair struct.
3023 *
3024 * This is the client interface for getting the amount of
3025 * enqueued data in the QPair from the point of the view of the
3026 * caller as the producer which is not the common case. Returns < 0 if err,
3027 * else available bytes that may be read.
3028 */
3029s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
3030{
3031 struct vmci_queue_header *produce_q_header;
3032 struct vmci_queue_header *consume_q_header;
3033 s64 result;
3034
3035 if (!qpair)
3036 return VMCI_ERROR_INVALID_ARGS;
3037
3038 qp_lock(qpair);
3039 result =
3040 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3041 if (result == VMCI_SUCCESS)
3042 result = vmci_q_header_buf_ready(produce_q_header,
3043 consume_q_header,
3044 qpair->produce_q_size);
3045 else
3046 result = 0;
3047
3048 qp_unlock(qpair);
3049
3050 return result;
3051}
3052EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
3053
3054/*
3055 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
3056 * consumer queue.
3057 * @qpair: Pointer to the queue pair struct.
3058 *
3059 * This is the client interface for getting the amount of
3060 * enqueued data in the QPair from the point of the view of the
3061 * caller as the consumer which is the normal case. Returns < 0 if err,
3062 * else available bytes that may be read.
3063 */
3064s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
3065{
3066 struct vmci_queue_header *produce_q_header;
3067 struct vmci_queue_header *consume_q_header;
3068 s64 result;
3069
3070 if (!qpair)
3071 return VMCI_ERROR_INVALID_ARGS;
3072
3073 qp_lock(qpair);
3074 result =
3075 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3076 if (result == VMCI_SUCCESS)
3077 result = vmci_q_header_buf_ready(consume_q_header,
3078 produce_q_header,
3079 qpair->consume_q_size);
3080 else
3081 result = 0;
3082
3083 qp_unlock(qpair);
3084
3085 return result;
3086}
3087EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3088
3089/*
3090 * vmci_qpair_enqueue() - Throw data on the queue.
3091 * @qpair: Pointer to the queue pair struct.
3092 * @buf: Pointer to buffer containing data
3093 * @buf_size: Length of buffer.
3094 * @buf_type: Buffer type (Unused).
3095 *
3096 * This is the client interface for enqueueing data into the queue.
3097 * Returns number of bytes enqueued or < 0 on error.
3098 */
3099ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3100 const void *buf,
3101 size_t buf_size,
3102 int buf_type)
3103{
3104 ssize_t result;
3105
3106 if (!qpair || !buf)
3107 return VMCI_ERROR_INVALID_ARGS;
3108
3109 qp_lock(qpair);
3110
3111 do {
3112 result = qp_enqueue_locked(qpair->produce_q,
3113 qpair->consume_q,
3114 qpair->produce_q_size,
3115 buf, buf_size,
45412bef 3116 qp_memcpy_to_queue);
06164d2b
GZ
3117
3118 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3119 !qp_wait_for_ready_queue(qpair))
3120 result = VMCI_ERROR_WOULD_BLOCK;
3121
3122 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3123
3124 qp_unlock(qpair);
3125
3126 return result;
3127}
3128EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3129
3130/*
3131 * vmci_qpair_dequeue() - Get data from the queue.
3132 * @qpair: Pointer to the queue pair struct.
3133 * @buf: Pointer to buffer for the data
3134 * @buf_size: Length of buffer.
3135 * @buf_type: Buffer type (Unused).
3136 *
3137 * This is the client interface for dequeueing data from the queue.
3138 * Returns number of bytes dequeued or < 0 on error.
3139 */
3140ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3141 void *buf,
3142 size_t buf_size,
3143 int buf_type)
3144{
3145 ssize_t result;
3146
3147 if (!qpair || !buf)
3148 return VMCI_ERROR_INVALID_ARGS;
3149
3150 qp_lock(qpair);
3151
3152 do {
3153 result = qp_dequeue_locked(qpair->produce_q,
3154 qpair->consume_q,
3155 qpair->consume_q_size,
3156 buf, buf_size,
45412bef 3157 qp_memcpy_from_queue, true);
06164d2b
GZ
3158
3159 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3160 !qp_wait_for_ready_queue(qpair))
3161 result = VMCI_ERROR_WOULD_BLOCK;
3162
3163 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3164
3165 qp_unlock(qpair);
3166
3167 return result;
3168}
3169EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3170
3171/*
3172 * vmci_qpair_peek() - Peek at the data in the queue.
3173 * @qpair: Pointer to the queue pair struct.
3174 * @buf: Pointer to buffer for the data
3175 * @buf_size: Length of buffer.
3176 * @buf_type: Buffer type (Unused on Linux).
3177 *
3178 * This is the client interface for peeking into a queue. (I.e.,
3179 * copy data from the queue without updating the head pointer.)
3180 * Returns number of bytes dequeued or < 0 on error.
3181 */
3182ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3183 void *buf,
3184 size_t buf_size,
3185 int buf_type)
3186{
3187 ssize_t result;
3188
3189 if (!qpair || !buf)
3190 return VMCI_ERROR_INVALID_ARGS;
3191
3192 qp_lock(qpair);
3193
3194 do {
3195 result = qp_dequeue_locked(qpair->produce_q,
3196 qpair->consume_q,
3197 qpair->consume_q_size,
3198 buf, buf_size,
45412bef 3199 qp_memcpy_from_queue, false);
06164d2b
GZ
3200
3201 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3202 !qp_wait_for_ready_queue(qpair))
3203 result = VMCI_ERROR_WOULD_BLOCK;
3204
3205 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3206
3207 qp_unlock(qpair);
3208
3209 return result;
3210}
3211EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3212
3213/*
3214 * vmci_qpair_enquev() - Throw data on the queue using iov.
3215 * @qpair: Pointer to the queue pair struct.
3216 * @iov: Pointer to buffer containing data
3217 * @iov_size: Length of buffer.
3218 * @buf_type: Buffer type (Unused).
3219 *
3220 * This is the client interface for enqueueing data into the queue.
3221 * This function uses IO vectors to handle the work. Returns number
3222 * of bytes enqueued or < 0 on error.
3223 */
3224ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3225 void *iov,
3226 size_t iov_size,
3227 int buf_type)
3228{
3229 ssize_t result;
3230
3231 if (!qpair || !iov)
3232 return VMCI_ERROR_INVALID_ARGS;
3233
3234 qp_lock(qpair);
3235
3236 do {
3237 result = qp_enqueue_locked(qpair->produce_q,
3238 qpair->consume_q,
3239 qpair->produce_q_size,
3240 iov, iov_size,
45412bef 3241 qp_memcpy_to_queue_iov);
06164d2b
GZ
3242
3243 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3244 !qp_wait_for_ready_queue(qpair))
3245 result = VMCI_ERROR_WOULD_BLOCK;
3246
3247 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3248
3249 qp_unlock(qpair);
3250
3251 return result;
3252}
3253EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3254
3255/*
3256 * vmci_qpair_dequev() - Get data from the queue using iov.
3257 * @qpair: Pointer to the queue pair struct.
3258 * @iov: Pointer to buffer for the data
3259 * @iov_size: Length of buffer.
3260 * @buf_type: Buffer type (Unused).
3261 *
3262 * This is the client interface for dequeueing data from the queue.
3263 * This function uses IO vectors to handle the work. Returns number
3264 * of bytes dequeued or < 0 on error.
3265 */
3266ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3267 void *iov,
3268 size_t iov_size,
3269 int buf_type)
3270{
3271 ssize_t result;
3272
06164d2b
GZ
3273 if (!qpair || !iov)
3274 return VMCI_ERROR_INVALID_ARGS;
3275
32b083a3
AK
3276 qp_lock(qpair);
3277
06164d2b
GZ
3278 do {
3279 result = qp_dequeue_locked(qpair->produce_q,
3280 qpair->consume_q,
3281 qpair->consume_q_size,
3282 iov, iov_size,
3283 qp_memcpy_from_queue_iov,
45412bef 3284 true);
06164d2b
GZ
3285
3286 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3287 !qp_wait_for_ready_queue(qpair))
3288 result = VMCI_ERROR_WOULD_BLOCK;
3289
3290 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3291
3292 qp_unlock(qpair);
3293
3294 return result;
3295}
3296EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3297
3298/*
3299 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3300 * @qpair: Pointer to the queue pair struct.
3301 * @iov: Pointer to buffer for the data
3302 * @iov_size: Length of buffer.
3303 * @buf_type: Buffer type (Unused on Linux).
3304 *
3305 * This is the client interface for peeking into a queue. (I.e.,
3306 * copy data from the queue without updating the head pointer.)
3307 * This function uses IO vectors to handle the work. Returns number
3308 * of bytes peeked or < 0 on error.
3309 */
3310ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3311 void *iov,
3312 size_t iov_size,
3313 int buf_type)
3314{
3315 ssize_t result;
3316
3317 if (!qpair || !iov)
3318 return VMCI_ERROR_INVALID_ARGS;
3319
3320 qp_lock(qpair);
3321
3322 do {
3323 result = qp_dequeue_locked(qpair->produce_q,
3324 qpair->consume_q,
3325 qpair->consume_q_size,
3326 iov, iov_size,
3327 qp_memcpy_from_queue_iov,
45412bef 3328 false);
06164d2b
GZ
3329
3330 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3331 !qp_wait_for_ready_queue(qpair))
3332 result = VMCI_ERROR_WOULD_BLOCK;
3333
3334 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3335
3336 qp_unlock(qpair);
3337 return result;
3338}
3339EXPORT_SYMBOL_GPL(vmci_qpair_peekv);