virtio-fs: add multi-queue support
[linux-2.6-block.git] / fs / fuse / virtio_fs.c
CommitLineData
a62a8ef9
SH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
5 */
6
7#include <linux/fs.h>
22f3787e
SH
8#include <linux/dax.h>
9#include <linux/pci.h>
529395d2
PJG
10#include <linux/interrupt.h>
11#include <linux/group_cpus.h>
22f3787e 12#include <linux/pfn_t.h>
dc90f084 13#include <linux/memremap.h>
a62a8ef9
SH
14#include <linux/module.h>
15#include <linux/virtio.h>
16#include <linux/virtio_fs.h>
17#include <linux/delay.h>
18#include <linux/fs_context.h>
1dd53957 19#include <linux/fs_parser.h>
a62a8ef9 20#include <linux/highmem.h>
562ce828 21#include <linux/cleanup.h>
22f3787e 22#include <linux/uio.h>
a62a8ef9
SH
23#include "fuse_i.h"
24
a7f0d7aa
CK
25/* Used to help calculate the FUSE connection's max_pages limit for a request's
26 * size. Parts of the struct fuse_req are sliced into scattergather lists in
27 * addition to the pages used, so this can help account for that overhead.
28 */
29#define FUSE_HEADER_OVERHEAD 4
30
a62a8ef9
SH
31/* List of virtio-fs device instances and a lock for the list. Also provides
32 * mutual exclusion in device removal and mounting path
33 */
34static DEFINE_MUTEX(virtio_fs_mutex);
35static LIST_HEAD(virtio_fs_instances);
36
a8f62f50
SH
37/* The /sys/fs/virtio_fs/ kset */
38static struct kset *virtio_fs_kset;
39
a62a8ef9
SH
40enum {
41 VQ_HIPRIO,
42 VQ_REQUEST
43};
44
b43b7e81
VG
45#define VQ_NAME_LEN 24
46
a62a8ef9
SH
47/* Per-virtqueue state */
48struct virtio_fs_vq {
49 spinlock_t lock;
50 struct virtqueue *vq; /* protected by ->lock */
51 struct work_struct done_work;
52 struct list_head queued_reqs;
51fecdd2 53 struct list_head end_reqs; /* End these requests */
a62a8ef9
SH
54 struct delayed_work dispatch_work;
55 struct fuse_dev *fud;
56 bool connected;
57 long in_flight;
724c15a4 58 struct completion in_flight_zero; /* No inflight requests */
b43b7e81 59 char name[VQ_NAME_LEN];
a62a8ef9
SH
60} ____cacheline_aligned_in_smp;
61
62/* A virtio-fs device instance */
63struct virtio_fs {
a8f62f50 64 struct kobject kobj;
a62a8ef9
SH
65 struct list_head list; /* on virtio_fs_instances */
66 char *tag;
67 struct virtio_fs_vq *vqs;
68 unsigned int nvqs; /* number of virtqueues */
69 unsigned int num_request_queues; /* number of request queues */
22f3787e
SH
70 struct dax_device *dax_dev;
71
529395d2
PJG
72 unsigned int *mq_map; /* index = cpu id, value = request vq id */
73
22f3787e
SH
74 /* DAX memory window where file contents are mapped */
75 void *window_kaddr;
76 phys_addr_t window_phys_addr;
77 size_t window_len;
a62a8ef9
SH
78};
79
1efcf39e 80struct virtio_fs_forget_req {
a62a8ef9
SH
81 struct fuse_in_header ih;
82 struct fuse_forget_in arg;
1efcf39e
VG
83};
84
85struct virtio_fs_forget {
a62a8ef9
SH
86 /* This request can be temporarily queued on virt queue */
87 struct list_head list;
1efcf39e 88 struct virtio_fs_forget_req req;
a62a8ef9
SH
89};
90
bb737bbe
VG
91struct virtio_fs_req_work {
92 struct fuse_req *req;
93 struct virtio_fs_vq *fsvq;
94 struct work_struct done_work;
95};
96
a9bfd9dd
VG
97static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
98 struct fuse_req *req, bool in_flight);
99
780b1b95
JX
100static const struct constant_table dax_param_enums[] = {
101 {"always", FUSE_DAX_ALWAYS },
102 {"never", FUSE_DAX_NEVER },
103 {"inode", FUSE_DAX_INODE_USER },
104 {}
105};
106
1dd53957
VG
107enum {
108 OPT_DAX,
780b1b95 109 OPT_DAX_ENUM,
1dd53957
VG
110};
111
112static const struct fs_parameter_spec virtio_fs_parameters[] = {
113 fsparam_flag("dax", OPT_DAX),
780b1b95 114 fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
1dd53957
VG
115 {}
116};
117
84c21507 118static int virtio_fs_parse_param(struct fs_context *fsc,
1dd53957
VG
119 struct fs_parameter *param)
120{
121 struct fs_parse_result result;
84c21507 122 struct fuse_fs_context *ctx = fsc->fs_private;
1dd53957
VG
123 int opt;
124
84c21507 125 opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
1dd53957
VG
126 if (opt < 0)
127 return opt;
128
129 switch (opt) {
130 case OPT_DAX:
780b1b95
JX
131 ctx->dax_mode = FUSE_DAX_ALWAYS;
132 break;
133 case OPT_DAX_ENUM:
134 ctx->dax_mode = result.uint_32;
1dd53957
VG
135 break;
136 default:
137 return -EINVAL;
138 }
139
140 return 0;
141}
142
84c21507 143static void virtio_fs_free_fsc(struct fs_context *fsc)
1dd53957 144{
84c21507 145 struct fuse_fs_context *ctx = fsc->fs_private;
1dd53957
VG
146
147 kfree(ctx);
148}
149
a62a8ef9
SH
150static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
151{
152 struct virtio_fs *fs = vq->vdev->priv;
153
154 return &fs->vqs[vq->index];
155}
156
c17ea009
VG
157/* Should be called with fsvq->lock held. */
158static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
159{
160 fsvq->in_flight++;
161}
162
163/* Should be called with fsvq->lock held. */
164static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
165{
166 WARN_ON(fsvq->in_flight <= 0);
167 fsvq->in_flight--;
724c15a4
VG
168 if (!fsvq->in_flight)
169 complete(&fsvq->in_flight_zero);
c17ea009
VG
170}
171
a8f62f50
SH
172static ssize_t tag_show(struct kobject *kobj,
173 struct kobj_attribute *attr, char *buf)
174{
175 struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
176
96d88f65 177 return sysfs_emit(buf, "%s\n", fs->tag);
a8f62f50
SH
178}
179
180static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag);
181
182static struct attribute *virtio_fs_attrs[] = {
183 &virtio_fs_tag_attr.attr,
184 NULL
185};
186ATTRIBUTE_GROUPS(virtio_fs);
187
188static void virtio_fs_ktype_release(struct kobject *kobj)
a62a8ef9 189{
a8f62f50 190 struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
a62a8ef9 191
529395d2 192 kfree(vfs->mq_map);
a62a8ef9
SH
193 kfree(vfs->vqs);
194 kfree(vfs);
195}
196
a8f62f50
SH
197static const struct kobj_type virtio_fs_ktype = {
198 .release = virtio_fs_ktype_release,
199 .sysfs_ops = &kobj_sysfs_ops,
200 .default_groups = virtio_fs_groups,
201};
202
a62a8ef9
SH
203/* Make sure virtiofs_mutex is held */
204static void virtio_fs_put(struct virtio_fs *fs)
205{
a8f62f50 206 kobject_put(&fs->kobj);
a62a8ef9
SH
207}
208
209static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
210{
211 struct virtio_fs *vfs = fiq->priv;
212
213 mutex_lock(&virtio_fs_mutex);
214 virtio_fs_put(vfs);
215 mutex_unlock(&virtio_fs_mutex);
216}
217
218static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
219{
220 WARN_ON(fsvq->in_flight < 0);
221
222 /* Wait for in flight requests to finish.*/
724c15a4
VG
223 spin_lock(&fsvq->lock);
224 if (fsvq->in_flight) {
225 /* We are holding virtio_fs_mutex. There should not be any
226 * waiters waiting for completion.
227 */
228 reinit_completion(&fsvq->in_flight_zero);
229 spin_unlock(&fsvq->lock);
230 wait_for_completion(&fsvq->in_flight_zero);
231 } else {
a62a8ef9 232 spin_unlock(&fsvq->lock);
a62a8ef9
SH
233 }
234
235 flush_work(&fsvq->done_work);
236 flush_delayed_work(&fsvq->dispatch_work);
237}
238
724c15a4 239static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
a62a8ef9
SH
240{
241 struct virtio_fs_vq *fsvq;
242 int i;
243
244 for (i = 0; i < fs->nvqs; i++) {
245 fsvq = &fs->vqs[i];
a62a8ef9
SH
246 virtio_fs_drain_queue(fsvq);
247 }
248}
249
724c15a4
VG
250static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
251{
252 /* Provides mutual exclusion between ->remove and ->kill_sb
253 * paths. We don't want both of these draining queue at the
254 * same time. Current completion logic reinits completion
255 * and that means there should not be any other thread
256 * doing reinit or waiting for completion already.
257 */
258 mutex_lock(&virtio_fs_mutex);
259 virtio_fs_drain_all_queues_locked(fs);
260 mutex_unlock(&virtio_fs_mutex);
261}
262
a62a8ef9
SH
263static void virtio_fs_start_all_queues(struct virtio_fs *fs)
264{
265 struct virtio_fs_vq *fsvq;
266 int i;
267
268 for (i = 0; i < fs->nvqs; i++) {
269 fsvq = &fs->vqs[i];
270 spin_lock(&fsvq->lock);
271 fsvq->connected = true;
272 spin_unlock(&fsvq->lock);
273 }
274}
275
276/* Add a new instance to the list or return -EEXIST if tag name exists*/
a8f62f50
SH
277static int virtio_fs_add_instance(struct virtio_device *vdev,
278 struct virtio_fs *fs)
a62a8ef9
SH
279{
280 struct virtio_fs *fs2;
a8f62f50 281 int ret;
a62a8ef9
SH
282
283 mutex_lock(&virtio_fs_mutex);
284
285 list_for_each_entry(fs2, &virtio_fs_instances, list) {
a8f62f50
SH
286 if (strcmp(fs->tag, fs2->tag) == 0) {
287 mutex_unlock(&virtio_fs_mutex);
288 return -EEXIST;
289 }
a62a8ef9
SH
290 }
291
a8f62f50
SH
292 /* Use the virtio_device's index as a unique identifier, there is no
293 * need to allocate our own identifiers because the virtio_fs instance
294 * is only visible to userspace as long as the underlying virtio_device
295 * exists.
296 */
297 fs->kobj.kset = virtio_fs_kset;
298 ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
299 if (ret < 0) {
300 mutex_unlock(&virtio_fs_mutex);
301 return ret;
302 }
303
304 ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
305 if (ret < 0) {
306 kobject_del(&fs->kobj);
307 mutex_unlock(&virtio_fs_mutex);
308 return ret;
a62a8ef9
SH
309 }
310
a8f62f50 311 list_add_tail(&fs->list, &virtio_fs_instances);
a62a8ef9
SH
312
313 mutex_unlock(&virtio_fs_mutex);
314
9086b2d9
SH
315 kobject_uevent(&fs->kobj, KOBJ_ADD);
316
a62a8ef9
SH
317 return 0;
318}
319
320/* Return the virtio_fs with a given tag, or NULL */
321static struct virtio_fs *virtio_fs_find_instance(const char *tag)
322{
323 struct virtio_fs *fs;
324
325 mutex_lock(&virtio_fs_mutex);
326
327 list_for_each_entry(fs, &virtio_fs_instances, list) {
328 if (strcmp(fs->tag, tag) == 0) {
a8f62f50 329 kobject_get(&fs->kobj);
a62a8ef9
SH
330 goto found;
331 }
332 }
333
334 fs = NULL; /* not found */
335
336found:
337 mutex_unlock(&virtio_fs_mutex);
338
339 return fs;
340}
341
342static void virtio_fs_free_devs(struct virtio_fs *fs)
343{
344 unsigned int i;
345
346 for (i = 0; i < fs->nvqs; i++) {
347 struct virtio_fs_vq *fsvq = &fs->vqs[i];
348
349 if (!fsvq->fud)
350 continue;
351
352 fuse_dev_free(fsvq->fud);
353 fsvq->fud = NULL;
354 }
355}
356
357/* Read filesystem name from virtio config into fs->tag (must kfree()). */
358static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
359{
360 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
361 char *end;
362 size_t len;
363
364 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
365 &tag_buf, sizeof(tag_buf));
366 end = memchr(tag_buf, '\0', sizeof(tag_buf));
367 if (end == tag_buf)
368 return -EINVAL; /* empty tag */
369 if (!end)
370 end = &tag_buf[sizeof(tag_buf)];
371
372 len = end - tag_buf;
373 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
374 if (!fs->tag)
375 return -ENOMEM;
376 memcpy(fs->tag, tag_buf, len);
377 fs->tag[len] = '\0';
40488cc1
SH
378
379 /* While the VIRTIO specification allows any character, newlines are
380 * awkward on mount(8) command-lines and cause problems in the sysfs
381 * "tag" attr and uevent TAG= properties. Forbid them.
382 */
383 if (strchr(fs->tag, '\n')) {
384 dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
385 return -EINVAL;
386 }
387
a62a8ef9
SH
388 return 0;
389}
390
391/* Work function for hiprio completion */
392static void virtio_fs_hiprio_done_work(struct work_struct *work)
393{
394 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
395 done_work);
396 struct virtqueue *vq = fsvq->vq;
397
398 /* Free completed FUSE_FORGET requests */
399 spin_lock(&fsvq->lock);
400 do {
401 unsigned int len;
402 void *req;
403
404 virtqueue_disable_cb(vq);
405
406 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
407 kfree(req);
c17ea009 408 dec_in_flight_req(fsvq);
a62a8ef9 409 }
f9c29137 410 } while (!virtqueue_enable_cb(vq));
a62a8ef9
SH
411 spin_unlock(&fsvq->lock);
412}
413
51fecdd2 414static void virtio_fs_request_dispatch_work(struct work_struct *work)
a62a8ef9 415{
51fecdd2
VG
416 struct fuse_req *req;
417 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
418 dispatch_work.work);
a9bfd9dd 419 int ret;
51fecdd2
VG
420
421 pr_debug("virtio-fs: worker %s called.\n", __func__);
422 while (1) {
423 spin_lock(&fsvq->lock);
424 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
425 list);
426 if (!req) {
427 spin_unlock(&fsvq->lock);
a9bfd9dd 428 break;
51fecdd2
VG
429 }
430
431 list_del_init(&req->list);
432 spin_unlock(&fsvq->lock);
8f622e94 433 fuse_request_end(req);
51fecdd2 434 }
a9bfd9dd
VG
435
436 /* Dispatch pending requests */
437 while (1) {
438 spin_lock(&fsvq->lock);
439 req = list_first_entry_or_null(&fsvq->queued_reqs,
440 struct fuse_req, list);
441 if (!req) {
442 spin_unlock(&fsvq->lock);
443 return;
444 }
445 list_del_init(&req->list);
446 spin_unlock(&fsvq->lock);
447
448 ret = virtio_fs_enqueue_req(fsvq, req, true);
449 if (ret < 0) {
450 if (ret == -ENOMEM || ret == -ENOSPC) {
451 spin_lock(&fsvq->lock);
452 list_add_tail(&req->list, &fsvq->queued_reqs);
453 schedule_delayed_work(&fsvq->dispatch_work,
454 msecs_to_jiffies(1));
455 spin_unlock(&fsvq->lock);
456 return;
457 }
458 req->out.h.error = ret;
459 spin_lock(&fsvq->lock);
460 dec_in_flight_req(fsvq);
461 spin_unlock(&fsvq->lock);
462 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
463 ret);
8f622e94 464 fuse_request_end(req);
a9bfd9dd
VG
465 }
466 }
a62a8ef9
SH
467}
468
58ada94f
VG
469/*
470 * Returns 1 if queue is full and sender should wait a bit before sending
471 * next request, 0 otherwise.
472 */
473static int send_forget_request(struct virtio_fs_vq *fsvq,
474 struct virtio_fs_forget *forget,
475 bool in_flight)
476{
477 struct scatterlist sg;
478 struct virtqueue *vq;
479 int ret = 0;
480 bool notify;
1efcf39e 481 struct virtio_fs_forget_req *req = &forget->req;
58ada94f
VG
482
483 spin_lock(&fsvq->lock);
484 if (!fsvq->connected) {
485 if (in_flight)
486 dec_in_flight_req(fsvq);
487 kfree(forget);
488 goto out;
489 }
490
1efcf39e 491 sg_init_one(&sg, req, sizeof(*req));
58ada94f
VG
492 vq = fsvq->vq;
493 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
494
495 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
496 if (ret < 0) {
497 if (ret == -ENOMEM || ret == -ENOSPC) {
498 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
499 ret);
500 list_add_tail(&forget->list, &fsvq->queued_reqs);
501 schedule_delayed_work(&fsvq->dispatch_work,
502 msecs_to_jiffies(1));
503 if (!in_flight)
504 inc_in_flight_req(fsvq);
505 /* Queue is full */
506 ret = 1;
507 } else {
508 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
509 ret);
510 kfree(forget);
511 if (in_flight)
512 dec_in_flight_req(fsvq);
513 }
514 goto out;
515 }
516
517 if (!in_flight)
518 inc_in_flight_req(fsvq);
519 notify = virtqueue_kick_prepare(vq);
520 spin_unlock(&fsvq->lock);
521
522 if (notify)
523 virtqueue_notify(vq);
524 return ret;
525out:
526 spin_unlock(&fsvq->lock);
527 return ret;
528}
529
a62a8ef9
SH
530static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
531{
532 struct virtio_fs_forget *forget;
533 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
534 dispatch_work.work);
a62a8ef9
SH
535 pr_debug("virtio-fs: worker %s called.\n", __func__);
536 while (1) {
537 spin_lock(&fsvq->lock);
538 forget = list_first_entry_or_null(&fsvq->queued_reqs,
539 struct virtio_fs_forget, list);
540 if (!forget) {
541 spin_unlock(&fsvq->lock);
542 return;
543 }
544
545 list_del(&forget->list);
a62a8ef9 546 spin_unlock(&fsvq->lock);
58ada94f
VG
547 if (send_forget_request(fsvq, forget, true))
548 return;
a62a8ef9
SH
549 }
550}
551
552/* Allocate and copy args into req->argbuf */
553static int copy_args_to_argbuf(struct fuse_req *req)
554{
555 struct fuse_args *args = req->args;
556 unsigned int offset = 0;
557 unsigned int num_in;
558 unsigned int num_out;
559 unsigned int len;
560 unsigned int i;
561
562 num_in = args->in_numargs - args->in_pages;
563 num_out = args->out_numargs - args->out_pages;
564 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
565 fuse_len_args(num_out, args->out_args);
566
567 req->argbuf = kmalloc(len, GFP_ATOMIC);
568 if (!req->argbuf)
569 return -ENOMEM;
570
571 for (i = 0; i < num_in; i++) {
572 memcpy(req->argbuf + offset,
573 args->in_args[i].value,
574 args->in_args[i].size);
575 offset += args->in_args[i].size;
576 }
577
578 return 0;
579}
580
581/* Copy args out of and free req->argbuf */
582static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
583{
584 unsigned int remaining;
585 unsigned int offset;
586 unsigned int num_in;
587 unsigned int num_out;
588 unsigned int i;
589
590 remaining = req->out.h.len - sizeof(req->out.h);
591 num_in = args->in_numargs - args->in_pages;
592 num_out = args->out_numargs - args->out_pages;
593 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
594
595 for (i = 0; i < num_out; i++) {
596 unsigned int argsize = args->out_args[i].size;
597
598 if (args->out_argvar &&
599 i == args->out_numargs - 1 &&
600 argsize > remaining) {
601 argsize = remaining;
602 }
603
604 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
605 offset += argsize;
606
607 if (i != args->out_numargs - 1)
608 remaining -= argsize;
609 }
610
611 /* Store the actual size of the variable-length arg */
612 if (args->out_argvar)
613 args->out_args[args->out_numargs - 1].size = remaining;
614
615 kfree(req->argbuf);
616 req->argbuf = NULL;
617}
618
619/* Work function for request completion */
bb737bbe
VG
620static void virtio_fs_request_complete(struct fuse_req *req,
621 struct virtio_fs_vq *fsvq)
622{
623 struct fuse_pqueue *fpq = &fsvq->fud->pq;
bb737bbe
VG
624 struct fuse_args *args;
625 struct fuse_args_pages *ap;
626 unsigned int len, i, thislen;
627 struct page *page;
628
629 /*
630 * TODO verify that server properly follows FUSE protocol
631 * (oh.uniq, oh.len)
632 */
633 args = req->args;
634 copy_args_from_argbuf(args, req);
635
636 if (args->out_pages && args->page_zeroing) {
637 len = args->out_args[args->out_numargs - 1].size;
638 ap = container_of(args, typeof(*ap), args);
639 for (i = 0; i < ap->num_pages; i++) {
640 thislen = ap->descs[i].length;
641 if (len < thislen) {
642 WARN_ON(ap->descs[i].offset);
643 page = ap->pages[i];
644 zero_user_segment(page, len, thislen);
645 len = 0;
646 } else {
647 len -= thislen;
648 }
649 }
650 }
651
652 spin_lock(&fpq->lock);
653 clear_bit(FR_SENT, &req->flags);
654 spin_unlock(&fpq->lock);
655
8f622e94 656 fuse_request_end(req);
bb737bbe
VG
657 spin_lock(&fsvq->lock);
658 dec_in_flight_req(fsvq);
659 spin_unlock(&fsvq->lock);
660}
661
662static void virtio_fs_complete_req_work(struct work_struct *work)
663{
664 struct virtio_fs_req_work *w =
665 container_of(work, typeof(*w), done_work);
666
667 virtio_fs_request_complete(w->req, w->fsvq);
668 kfree(w);
669}
670
a62a8ef9
SH
671static void virtio_fs_requests_done_work(struct work_struct *work)
672{
673 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
674 done_work);
675 struct fuse_pqueue *fpq = &fsvq->fud->pq;
a62a8ef9
SH
676 struct virtqueue *vq = fsvq->vq;
677 struct fuse_req *req;
a62a8ef9 678 struct fuse_req *next;
bb737bbe 679 unsigned int len;
a62a8ef9
SH
680 LIST_HEAD(reqs);
681
682 /* Collect completed requests off the virtqueue */
683 spin_lock(&fsvq->lock);
684 do {
685 virtqueue_disable_cb(vq);
686
687 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
688 spin_lock(&fpq->lock);
689 list_move_tail(&req->list, &reqs);
690 spin_unlock(&fpq->lock);
691 }
f9c29137 692 } while (!virtqueue_enable_cb(vq));
a62a8ef9
SH
693 spin_unlock(&fsvq->lock);
694
695 /* End requests */
696 list_for_each_entry_safe(req, next, &reqs, list) {
a62a8ef9 697 list_del_init(&req->list);
a62a8ef9 698
bb737bbe
VG
699 /* blocking async request completes in a worker context */
700 if (req->args->may_block) {
701 struct virtio_fs_req_work *w;
702
703 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
704 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
705 w->fsvq = fsvq;
706 w->req = req;
707 schedule_work(&w->done_work);
708 } else {
709 virtio_fs_request_complete(req, fsvq);
710 }
a62a8ef9
SH
711 }
712}
713
529395d2
PJG
714static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
715{
716 const struct cpumask *mask, *masks;
717 unsigned int q, cpu;
718
719 /* First attempt to map using existing transport layer affinities
720 * e.g. PCIe MSI-X
721 */
722 if (!vdev->config->get_vq_affinity)
723 goto fallback;
724
725 for (q = 0; q < fs->num_request_queues; q++) {
726 mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
727 if (!mask)
728 goto fallback;
729
730 for_each_cpu(cpu, mask)
731 fs->mq_map[cpu] = q;
732 }
733
734 return;
735fallback:
736 /* Attempt to map evenly in groups over the CPUs */
737 masks = group_cpus_evenly(fs->num_request_queues);
738 /* If even this fails we default to all CPUs use queue zero */
739 if (!masks) {
740 for_each_possible_cpu(cpu)
741 fs->mq_map[cpu] = 0;
742 return;
743 }
744
745 for (q = 0; q < fs->num_request_queues; q++) {
746 for_each_cpu(cpu, &masks[q])
747 fs->mq_map[cpu] = q;
748 }
749 kfree(masks);
750}
751
a62a8ef9
SH
752/* Virtqueue interrupt handler */
753static void virtio_fs_vq_done(struct virtqueue *vq)
754{
755 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
756
757 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
758
759 schedule_work(&fsvq->done_work);
760}
761
b43b7e81
VG
762static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
763 int vq_type)
764{
7c594bbd 765 strscpy(fsvq->name, name, VQ_NAME_LEN);
b43b7e81
VG
766 spin_lock_init(&fsvq->lock);
767 INIT_LIST_HEAD(&fsvq->queued_reqs);
768 INIT_LIST_HEAD(&fsvq->end_reqs);
769 init_completion(&fsvq->in_flight_zero);
770
771 if (vq_type == VQ_REQUEST) {
772 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
773 INIT_DELAYED_WORK(&fsvq->dispatch_work,
774 virtio_fs_request_dispatch_work);
775 } else {
776 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
777 INIT_DELAYED_WORK(&fsvq->dispatch_work,
778 virtio_fs_hiprio_dispatch_work);
779 }
780}
781
a62a8ef9
SH
782/* Initialize virtqueues */
783static int virtio_fs_setup_vqs(struct virtio_device *vdev,
784 struct virtio_fs *fs)
785{
786 struct virtqueue **vqs;
787 vq_callback_t **callbacks;
529395d2
PJG
788 /* Specify pre_vectors to ensure that the queues before the
789 * request queues (e.g. hiprio) don't claim any of the CPUs in
790 * the multi-queue mapping and interrupt affinities
791 */
792 struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
a62a8ef9
SH
793 const char **names;
794 unsigned int i;
795 int ret = 0;
796
2c0349ec
MT
797 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
798 &fs->num_request_queues);
a62a8ef9
SH
799 if (fs->num_request_queues == 0)
800 return -EINVAL;
801
103c2de1
PJG
802 /* Truncate nr of request queues to nr_cpu_id */
803 fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
804 nr_cpu_ids);
b43b7e81 805 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
a62a8ef9
SH
806 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
807 if (!fs->vqs)
808 return -ENOMEM;
809
810 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
811 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
812 GFP_KERNEL);
813 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
529395d2
PJG
814 fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
815 dev_to_node(&vdev->dev));
816 if (!vqs || !callbacks || !names || !fs->mq_map) {
a62a8ef9
SH
817 ret = -ENOMEM;
818 goto out;
819 }
820
b43b7e81 821 /* Initialize the hiprio/forget request virtqueue */
a62a8ef9 822 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
b43b7e81 823 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
a62a8ef9 824 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
a62a8ef9
SH
825
826 /* Initialize the requests virtqueues */
827 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
b43b7e81
VG
828 char vq_name[VQ_NAME_LEN];
829
830 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
831 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
a62a8ef9
SH
832 callbacks[i] = virtio_fs_vq_done;
833 names[i] = fs->vqs[i].name;
834 }
835
529395d2 836 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc);
a62a8ef9
SH
837 if (ret < 0)
838 goto out;
839
840 for (i = 0; i < fs->nvqs; i++)
841 fs->vqs[i].vq = vqs[i];
842
843 virtio_fs_start_all_queues(fs);
844out:
845 kfree(names);
846 kfree(callbacks);
847 kfree(vqs);
529395d2 848 if (ret) {
a62a8ef9 849 kfree(fs->vqs);
529395d2
PJG
850 kfree(fs->mq_map);
851 }
a62a8ef9
SH
852 return ret;
853}
854
855/* Free virtqueues (device must already be reset) */
1e5b9e04 856static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
a62a8ef9
SH
857{
858 vdev->config->del_vqs(vdev);
859}
860
22f3787e
SH
861/* Map a window offset to a page frame number. The window offset will have
862 * been produced by .iomap_begin(), which maps a file offset to a window
863 * offset.
864 */
865static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
e511c4a3
JC
866 long nr_pages, enum dax_access_mode mode,
867 void **kaddr, pfn_t *pfn)
22f3787e
SH
868{
869 struct virtio_fs *fs = dax_get_private(dax_dev);
870 phys_addr_t offset = PFN_PHYS(pgoff);
73fb2c8b 871 size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
22f3787e
SH
872
873 if (kaddr)
874 *kaddr = fs->window_kaddr + offset;
875 if (pfn)
876 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
877 PFN_DEV | PFN_MAP);
878 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
879}
880
22f3787e
SH
881static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
882 pgoff_t pgoff, size_t nr_pages)
883{
884 long rc;
885 void *kaddr;
886
e511c4a3
JC
887 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
888 NULL);
22f3787e 889 if (rc < 0)
1ea7ca1b
JC
890 return dax_mem2blk_err(rc);
891
22f3787e
SH
892 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
893 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
894 return 0;
895}
896
897static const struct dax_operations virtio_fs_dax_ops = {
898 .direct_access = virtio_fs_direct_access,
22f3787e
SH
899 .zero_page_range = virtio_fs_zero_page_range,
900};
901
902static void virtio_fs_cleanup_dax(void *data)
903{
904 struct dax_device *dax_dev = data;
905
906 kill_dax(dax_dev);
907 put_dax(dax_dev);
908}
909
562ce828
MD
910DEFINE_FREE(cleanup_dax, struct dax_dev *, if (!IS_ERR_OR_NULL(_T)) virtio_fs_cleanup_dax(_T))
911
22f3787e
SH
912static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
913{
562ce828 914 struct dax_device *dax_dev __free(cleanup_dax) = NULL;
22f3787e
SH
915 struct virtio_shm_region cache_reg;
916 struct dev_pagemap *pgmap;
917 bool have_cache;
918
919 if (!IS_ENABLED(CONFIG_FUSE_DAX))
920 return 0;
921
562ce828
MD
922 dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
923 if (IS_ERR(dax_dev)) {
924 int rc = PTR_ERR(dax_dev);
925 return rc == -EOPNOTSUPP ? 0 : rc;
926 }
927
22f3787e
SH
928 /* Get cache region */
929 have_cache = virtio_get_shm_region(vdev, &cache_reg,
930 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
931 if (!have_cache) {
932 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
933 return 0;
934 }
935
936 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
937 dev_name(&vdev->dev))) {
938 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
939 cache_reg.addr, cache_reg.len);
940 return -EBUSY;
941 }
942
943 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
944 cache_reg.addr);
945
946 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
947 if (!pgmap)
948 return -ENOMEM;
949
950 pgmap->type = MEMORY_DEVICE_FS_DAX;
951
952 /* Ideally we would directly use the PCI BAR resource but
953 * devm_memremap_pages() wants its own copy in pgmap. So
954 * initialize a struct resource from scratch (only the start
955 * and end fields will be used).
956 */
69456535 957 pgmap->range = (struct range) {
22f3787e
SH
958 .start = (phys_addr_t) cache_reg.addr,
959 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
960 };
69456535 961 pgmap->nr_range = 1;
22f3787e
SH
962
963 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
964 if (IS_ERR(fs->window_kaddr))
965 return PTR_ERR(fs->window_kaddr);
966
967 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
968 fs->window_len = (phys_addr_t) cache_reg.len;
969
970 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
971 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
972
562ce828 973 fs->dax_dev = no_free_ptr(dax_dev);
22f3787e
SH
974 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
975 fs->dax_dev);
976}
977
a62a8ef9
SH
978static int virtio_fs_probe(struct virtio_device *vdev)
979{
980 struct virtio_fs *fs;
981 int ret;
982
983 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
984 if (!fs)
985 return -ENOMEM;
a8f62f50 986 kobject_init(&fs->kobj, &virtio_fs_ktype);
a62a8ef9
SH
987 vdev->priv = fs;
988
989 ret = virtio_fs_read_tag(vdev, fs);
990 if (ret < 0)
991 goto out;
992
993 ret = virtio_fs_setup_vqs(vdev, fs);
994 if (ret < 0)
995 goto out;
996
529395d2 997 virtio_fs_map_queues(vdev, fs);
a62a8ef9 998
22f3787e
SH
999 ret = virtio_fs_setup_dax(vdev, fs);
1000 if (ret < 0)
1001 goto out_vqs;
1002
a62a8ef9
SH
1003 /* Bring the device online in case the filesystem is mounted and
1004 * requests need to be sent before we return.
1005 */
1006 virtio_device_ready(vdev);
1007
a8f62f50 1008 ret = virtio_fs_add_instance(vdev, fs);
a62a8ef9
SH
1009 if (ret < 0)
1010 goto out_vqs;
1011
1012 return 0;
1013
1014out_vqs:
d9679d00 1015 virtio_reset_device(vdev);
1e5b9e04 1016 virtio_fs_cleanup_vqs(vdev);
a62a8ef9
SH
1017
1018out:
1019 vdev->priv = NULL;
a8f62f50 1020 kobject_put(&fs->kobj);
a62a8ef9
SH
1021 return ret;
1022}
1023
1024static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
1025{
1026 struct virtio_fs_vq *fsvq;
1027 int i;
1028
1029 for (i = 0; i < fs->nvqs; i++) {
1030 fsvq = &fs->vqs[i];
1031 spin_lock(&fsvq->lock);
1032 fsvq->connected = false;
1033 spin_unlock(&fsvq->lock);
1034 }
1035}
1036
1037static void virtio_fs_remove(struct virtio_device *vdev)
1038{
1039 struct virtio_fs *fs = vdev->priv;
1040
1041 mutex_lock(&virtio_fs_mutex);
1042 /* This device is going away. No one should get new reference */
1043 list_del_init(&fs->list);
a8f62f50
SH
1044 sysfs_remove_link(&fs->kobj, "device");
1045 kobject_del(&fs->kobj);
a62a8ef9 1046 virtio_fs_stop_all_queues(fs);
724c15a4 1047 virtio_fs_drain_all_queues_locked(fs);
d9679d00 1048 virtio_reset_device(vdev);
1e5b9e04 1049 virtio_fs_cleanup_vqs(vdev);
a62a8ef9
SH
1050
1051 vdev->priv = NULL;
1052 /* Put device reference on virtio_fs object */
1053 virtio_fs_put(fs);
1054 mutex_unlock(&virtio_fs_mutex);
1055}
1056
1057#ifdef CONFIG_PM_SLEEP
1058static int virtio_fs_freeze(struct virtio_device *vdev)
1059{
1060 /* TODO need to save state here */
1061 pr_warn("virtio-fs: suspend/resume not yet supported\n");
1062 return -EOPNOTSUPP;
1063}
1064
1065static int virtio_fs_restore(struct virtio_device *vdev)
1066{
1067 /* TODO need to restore state here */
1068 return 0;
1069}
1070#endif /* CONFIG_PM_SLEEP */
1071
00929447 1072static const struct virtio_device_id id_table[] = {
a62a8ef9
SH
1073 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
1074 {},
1075};
1076
00929447 1077static const unsigned int feature_table[] = {};
a62a8ef9
SH
1078
1079static struct virtio_driver virtio_fs_driver = {
1080 .driver.name = KBUILD_MODNAME,
1081 .driver.owner = THIS_MODULE,
1082 .id_table = id_table,
1083 .feature_table = feature_table,
1084 .feature_table_size = ARRAY_SIZE(feature_table),
1085 .probe = virtio_fs_probe,
1086 .remove = virtio_fs_remove,
1087#ifdef CONFIG_PM_SLEEP
1088 .freeze = virtio_fs_freeze,
1089 .restore = virtio_fs_restore,
1090#endif
1091};
1092
1093static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
1094__releases(fiq->lock)
1095{
1096 struct fuse_forget_link *link;
1097 struct virtio_fs_forget *forget;
1efcf39e 1098 struct virtio_fs_forget_req *req;
a62a8ef9 1099 struct virtio_fs *fs;
a62a8ef9 1100 struct virtio_fs_vq *fsvq;
a62a8ef9 1101 u64 unique;
a62a8ef9
SH
1102
1103 link = fuse_dequeue_forget(fiq, 1, NULL);
1104 unique = fuse_get_unique(fiq);
1105
1106 fs = fiq->priv;
1107 fsvq = &fs->vqs[VQ_HIPRIO];
1108 spin_unlock(&fiq->lock);
1109
1110 /* Allocate a buffer for the request */
1111 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1efcf39e 1112 req = &forget->req;
a62a8ef9 1113
1efcf39e 1114 req->ih = (struct fuse_in_header){
a62a8ef9
SH
1115 .opcode = FUSE_FORGET,
1116 .nodeid = link->forget_one.nodeid,
1117 .unique = unique,
1efcf39e 1118 .len = sizeof(*req),
a62a8ef9 1119 };
1efcf39e 1120 req->arg = (struct fuse_forget_in){
a62a8ef9
SH
1121 .nlookup = link->forget_one.nlookup,
1122 };
1123
58ada94f 1124 send_forget_request(fsvq, forget, false);
a62a8ef9
SH
1125 kfree(link);
1126}
1127
1128static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1129__releases(fiq->lock)
1130{
1131 /*
1132 * TODO interrupts.
1133 *
1134 * Normal fs operations on a local filesystems aren't interruptible.
1135 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1136 * with shared lock between host and guest.
1137 */
1138 spin_unlock(&fiq->lock);
1139}
1140
42d3e2d0
VG
1141/* Count number of scatter-gather elements required */
1142static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1143 unsigned int num_pages,
1144 unsigned int total_len)
1145{
1146 unsigned int i;
1147 unsigned int this_len;
1148
1149 for (i = 0; i < num_pages && total_len; i++) {
1150 this_len = min(page_descs[i].length, total_len);
1151 total_len -= this_len;
1152 }
1153
1154 return i;
1155}
1156
a62a8ef9
SH
1157/* Return the number of scatter-gather list elements required */
1158static unsigned int sg_count_fuse_req(struct fuse_req *req)
1159{
1160 struct fuse_args *args = req->args;
1161 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
42d3e2d0 1162 unsigned int size, total_sgs = 1 /* fuse_in_header */;
a62a8ef9
SH
1163
1164 if (args->in_numargs - args->in_pages)
1165 total_sgs += 1;
1166
42d3e2d0
VG
1167 if (args->in_pages) {
1168 size = args->in_args[args->in_numargs - 1].size;
1169 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1170 size);
1171 }
a62a8ef9
SH
1172
1173 if (!test_bit(FR_ISREPLY, &req->flags))
1174 return total_sgs;
1175
1176 total_sgs += 1 /* fuse_out_header */;
1177
1178 if (args->out_numargs - args->out_pages)
1179 total_sgs += 1;
1180
42d3e2d0
VG
1181 if (args->out_pages) {
1182 size = args->out_args[args->out_numargs - 1].size;
1183 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1184 size);
1185 }
a62a8ef9
SH
1186
1187 return total_sgs;
1188}
1189
1190/* Add pages to scatter-gather list and return number of elements used */
1191static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1192 struct page **pages,
1193 struct fuse_page_desc *page_descs,
1194 unsigned int num_pages,
1195 unsigned int total_len)
1196{
1197 unsigned int i;
1198 unsigned int this_len;
1199
1200 for (i = 0; i < num_pages && total_len; i++) {
1201 sg_init_table(&sg[i], 1);
1202 this_len = min(page_descs[i].length, total_len);
1203 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1204 total_len -= this_len;
1205 }
1206
1207 return i;
1208}
1209
1210/* Add args to scatter-gather list and return number of elements used */
1211static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1212 struct fuse_req *req,
1213 struct fuse_arg *args,
1214 unsigned int numargs,
1215 bool argpages,
1216 void *argbuf,
1217 unsigned int *len_used)
1218{
1219 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1220 unsigned int total_sgs = 0;
1221 unsigned int len;
1222
1223 len = fuse_len_args(numargs - argpages, args);
1224 if (len)
1225 sg_init_one(&sg[total_sgs++], argbuf, len);
1226
1227 if (argpages)
1228 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1229 ap->pages, ap->descs,
1230 ap->num_pages,
1231 args[numargs - 1].size);
1232
1233 if (len_used)
1234 *len_used = len;
1235
1236 return total_sgs;
1237}
1238
1239/* Add a request to a virtqueue and kick the device */
1240static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
a9bfd9dd 1241 struct fuse_req *req, bool in_flight)
a62a8ef9
SH
1242{
1243 /* requests need at least 4 elements */
1244 struct scatterlist *stack_sgs[6];
1245 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1246 struct scatterlist **sgs = stack_sgs;
1247 struct scatterlist *sg = stack_sg;
1248 struct virtqueue *vq;
1249 struct fuse_args *args = req->args;
1250 unsigned int argbuf_used = 0;
1251 unsigned int out_sgs = 0;
1252 unsigned int in_sgs = 0;
1253 unsigned int total_sgs;
1254 unsigned int i;
1255 int ret;
1256 bool notify;
5dbe190f 1257 struct fuse_pqueue *fpq;
a62a8ef9
SH
1258
1259 /* Does the sglist fit on the stack? */
1260 total_sgs = sg_count_fuse_req(req);
1261 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1262 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1263 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1264 if (!sgs || !sg) {
1265 ret = -ENOMEM;
1266 goto out;
1267 }
1268 }
1269
1270 /* Use a bounce buffer since stack args cannot be mapped */
1271 ret = copy_args_to_argbuf(req);
1272 if (ret < 0)
1273 goto out;
1274
1275 /* Request elements */
1276 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1277 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1278 (struct fuse_arg *)args->in_args,
1279 args->in_numargs, args->in_pages,
1280 req->argbuf, &argbuf_used);
1281
1282 /* Reply elements */
1283 if (test_bit(FR_ISREPLY, &req->flags)) {
1284 sg_init_one(&sg[out_sgs + in_sgs++],
1285 &req->out.h, sizeof(req->out.h));
1286 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1287 args->out_args, args->out_numargs,
1288 args->out_pages,
1289 req->argbuf + argbuf_used, NULL);
1290 }
1291
1292 WARN_ON(out_sgs + in_sgs != total_sgs);
1293
1294 for (i = 0; i < total_sgs; i++)
1295 sgs[i] = &sg[i];
1296
1297 spin_lock(&fsvq->lock);
1298
1299 if (!fsvq->connected) {
1300 spin_unlock(&fsvq->lock);
1301 ret = -ENOTCONN;
1302 goto out;
1303 }
1304
1305 vq = fsvq->vq;
1306 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1307 if (ret < 0) {
1308 spin_unlock(&fsvq->lock);
1309 goto out;
1310 }
1311
5dbe190f
VG
1312 /* Request successfully sent. */
1313 fpq = &fsvq->fud->pq;
1314 spin_lock(&fpq->lock);
1315 list_add_tail(&req->list, fpq->processing);
1316 spin_unlock(&fpq->lock);
1317 set_bit(FR_SENT, &req->flags);
1318 /* matches barrier in request_wait_answer() */
1319 smp_mb__after_atomic();
1320
a9bfd9dd
VG
1321 if (!in_flight)
1322 inc_in_flight_req(fsvq);
a62a8ef9
SH
1323 notify = virtqueue_kick_prepare(vq);
1324
1325 spin_unlock(&fsvq->lock);
1326
1327 if (notify)
1328 virtqueue_notify(vq);
1329
1330out:
1331 if (ret < 0 && req->argbuf) {
1332 kfree(req->argbuf);
1333 req->argbuf = NULL;
1334 }
1335 if (sgs != stack_sgs) {
1336 kfree(sgs);
1337 kfree(sg);
1338 }
1339
1340 return ret;
1341}
1342
1343static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1344__releases(fiq->lock)
1345{
529395d2 1346 unsigned int queue_id;
a62a8ef9 1347 struct virtio_fs *fs;
a62a8ef9 1348 struct fuse_req *req;
51fecdd2 1349 struct virtio_fs_vq *fsvq;
a62a8ef9
SH
1350 int ret;
1351
1352 WARN_ON(list_empty(&fiq->pending));
1353 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1354 clear_bit(FR_PENDING, &req->flags);
1355 list_del_init(&req->list);
1356 WARN_ON(!list_empty(&fiq->pending));
1357 spin_unlock(&fiq->lock);
1358
1359 fs = fiq->priv;
529395d2 1360 queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
a62a8ef9 1361
529395d2
PJG
1362 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
1363 __func__, req->in.h.opcode, req->in.h.unique,
a62a8ef9 1364 req->in.h.nodeid, req->in.h.len,
529395d2
PJG
1365 fuse_len_args(req->args->out_numargs, req->args->out_args),
1366 queue_id);
a62a8ef9 1367
51fecdd2 1368 fsvq = &fs->vqs[queue_id];
a9bfd9dd 1369 ret = virtio_fs_enqueue_req(fsvq, req, false);
a62a8ef9
SH
1370 if (ret < 0) {
1371 if (ret == -ENOMEM || ret == -ENOSPC) {
a9bfd9dd
VG
1372 /*
1373 * Virtqueue full. Retry submission from worker
1374 * context as we might be holding fc->bg_lock.
1375 */
1376 spin_lock(&fsvq->lock);
1377 list_add_tail(&req->list, &fsvq->queued_reqs);
1378 inc_in_flight_req(fsvq);
1379 schedule_delayed_work(&fsvq->dispatch_work,
1380 msecs_to_jiffies(1));
1381 spin_unlock(&fsvq->lock);
1382 return;
a62a8ef9
SH
1383 }
1384 req->out.h.error = ret;
1385 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
51fecdd2
VG
1386
1387 /* Can't end request in submission context. Use a worker */
1388 spin_lock(&fsvq->lock);
1389 list_add_tail(&req->list, &fsvq->end_reqs);
1390 schedule_delayed_work(&fsvq->dispatch_work, 0);
1391 spin_unlock(&fsvq->lock);
a62a8ef9
SH
1392 return;
1393 }
1394}
1395
00929447 1396static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
a62a8ef9
SH
1397 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1398 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1399 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1400 .release = virtio_fs_fiq_release,
1401};
1402
1dd53957 1403static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
a62a8ef9 1404{
1dd53957
VG
1405 ctx->rootmode = S_IFDIR;
1406 ctx->default_permissions = 1;
1407 ctx->allow_other = 1;
1408 ctx->max_read = UINT_MAX;
1409 ctx->blksize = 512;
1410 ctx->destroy = true;
1411 ctx->no_control = true;
1412 ctx->no_force_umount = true;
1413}
1414
1415static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
a62a8ef9 1416{
fcee216b
MR
1417 struct fuse_mount *fm = get_fuse_mount_super(sb);
1418 struct fuse_conn *fc = fm->fc;
a62a8ef9 1419 struct virtio_fs *fs = fc->iq.priv;
1dd53957 1420 struct fuse_fs_context *ctx = fsc->fs_private;
a62a8ef9
SH
1421 unsigned int i;
1422 int err;
a62a8ef9 1423
1dd53957 1424 virtio_fs_ctx_set_defaults(ctx);
a62a8ef9
SH
1425 mutex_lock(&virtio_fs_mutex);
1426
1427 /* After holding mutex, make sure virtiofs device is still there.
1428 * Though we are holding a reference to it, drive ->remove might
1429 * still have cleaned up virtual queues. In that case bail out.
1430 */
1431 err = -EINVAL;
1432 if (list_empty(&fs->list)) {
1433 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1434 goto err;
1435 }
1436
1437 err = -ENOMEM;
1438 /* Allocate fuse_dev for hiprio and notification queues */
7fd3abfa 1439 for (i = 0; i < fs->nvqs; i++) {
a62a8ef9
SH
1440 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1441
1442 fsvq->fud = fuse_dev_alloc();
1443 if (!fsvq->fud)
1444 goto err_free_fuse_devs;
1445 }
1446
7fd3abfa 1447 /* virtiofs allocates and installs its own fuse devices */
1dd53957 1448 ctx->fudptr = NULL;
780b1b95
JX
1449 if (ctx->dax_mode != FUSE_DAX_NEVER) {
1450 if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
3f9b9efd
VG
1451 err = -EINVAL;
1452 pr_err("virtio-fs: dax can't be enabled as filesystem"
1453 " device does not support it.\n");
1454 goto err_free_fuse_devs;
1455 }
1dd53957 1456 ctx->dax_dev = fs->dax_dev;
3f9b9efd 1457 }
1dd53957 1458 err = fuse_fill_super_common(sb, ctx);
a62a8ef9
SH
1459 if (err < 0)
1460 goto err_free_fuse_devs;
1461
a62a8ef9
SH
1462 for (i = 0; i < fs->nvqs; i++) {
1463 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1464
a62a8ef9
SH
1465 fuse_dev_install(fsvq->fud, fc);
1466 }
1467
1468 /* Previous unmount will stop all queues. Start these again */
1469 virtio_fs_start_all_queues(fs);
fcee216b 1470 fuse_send_init(fm);
a62a8ef9
SH
1471 mutex_unlock(&virtio_fs_mutex);
1472 return 0;
1473
1474err_free_fuse_devs:
1475 virtio_fs_free_devs(fs);
1476err:
1477 mutex_unlock(&virtio_fs_mutex);
1478 return err;
1479}
1480
fcee216b 1481static void virtio_fs_conn_destroy(struct fuse_mount *fm)
a62a8ef9 1482{
fcee216b
MR
1483 struct fuse_conn *fc = fm->fc;
1484 struct virtio_fs *vfs = fc->iq.priv;
1485 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
a62a8ef9 1486
fcee216b
MR
1487 /* Stop dax worker. Soon evict_inodes() will be called which
1488 * will free all memory ranges belonging to all inodes.
9a752d18
VG
1489 */
1490 if (IS_ENABLED(CONFIG_FUSE_DAX))
1491 fuse_dax_cancel_work(fc);
a62a8ef9
SH
1492
1493 /* Stop forget queue. Soon destroy will be sent */
1494 spin_lock(&fsvq->lock);
1495 fsvq->connected = false;
1496 spin_unlock(&fsvq->lock);
1497 virtio_fs_drain_all_queues(vfs);
1498
fcee216b 1499 fuse_conn_destroy(fm);
a62a8ef9 1500
fcee216b 1501 /* fuse_conn_destroy() must have sent destroy. Stop all queues
a62a8ef9
SH
1502 * and drain one more time and free fuse devices. Freeing fuse
1503 * devices will drop their reference on fuse_conn and that in
1504 * turn will drop its reference on virtio_fs object.
1505 */
1506 virtio_fs_stop_all_queues(vfs);
1507 virtio_fs_drain_all_queues(vfs);
1508 virtio_fs_free_devs(vfs);
1509}
1510
fcee216b
MR
1511static void virtio_kill_sb(struct super_block *sb)
1512{
1513 struct fuse_mount *fm = get_fuse_mount_super(sb);
1514 bool last;
1515
1516 /* If mount failed, we can still be called without any fc */
d534d31d 1517 if (sb->s_root) {
fcee216b
MR
1518 last = fuse_mount_remove(fm);
1519 if (last)
1520 virtio_fs_conn_destroy(fm);
1521 }
1522 kill_anon_super(sb);
a27c061a 1523 fuse_mount_destroy(fm);
fcee216b
MR
1524}
1525
a62a8ef9
SH
1526static int virtio_fs_test_super(struct super_block *sb,
1527 struct fs_context *fsc)
1528{
fcee216b
MR
1529 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1530 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
a62a8ef9 1531
fcee216b 1532 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
a62a8ef9
SH
1533}
1534
a62a8ef9
SH
1535static int virtio_fs_get_tree(struct fs_context *fsc)
1536{
1537 struct virtio_fs *fs;
1538 struct super_block *sb;
a7f0d7aa 1539 struct fuse_conn *fc = NULL;
fcee216b 1540 struct fuse_mount *fm;
a7f0d7aa
CK
1541 unsigned int virtqueue_size;
1542 int err = -EIO;
a62a8ef9
SH
1543
1544 /* This gets a reference on virtio_fs object. This ptr gets installed
1545 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1546 * to drop the reference to this object.
1547 */
1548 fs = virtio_fs_find_instance(fsc->source);
1549 if (!fs) {
1550 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1551 return -EINVAL;
1552 }
1553
a7f0d7aa
CK
1554 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1555 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1556 goto out_err;
1557
833c5a42 1558 err = -ENOMEM;
a62a8ef9 1559 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
833c5a42
MS
1560 if (!fc)
1561 goto out_err;
a62a8ef9 1562
fcee216b 1563 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
833c5a42
MS
1564 if (!fm)
1565 goto out_err;
fcee216b 1566
0a7419c6 1567 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
a62a8ef9
SH
1568 fc->release = fuse_free_conn;
1569 fc->delete_stale = true;
bf109c64 1570 fc->auto_submounts = true;
2d82ab25 1571 fc->sync_fs = true;
a62a8ef9 1572
a7f0d7aa
CK
1573 /* Tell FUSE to split requests that exceed the virtqueue's size */
1574 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1575 virtqueue_size - FUSE_HEADER_OVERHEAD);
1576
fcee216b 1577 fsc->s_fs_info = fm;
b19d3d00 1578 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
c191cd07
MS
1579 if (fsc->s_fs_info)
1580 fuse_mount_destroy(fm);
a62a8ef9
SH
1581 if (IS_ERR(sb))
1582 return PTR_ERR(sb);
1583
1584 if (!sb->s_root) {
1dd53957 1585 err = virtio_fs_fill_super(sb, fsc);
a62a8ef9
SH
1586 if (err) {
1587 deactivate_locked_super(sb);
1588 return err;
1589 }
1590
1591 sb->s_flags |= SB_ACTIVE;
1592 }
1593
1594 WARN_ON(fsc->root);
1595 fsc->root = dget(sb->s_root);
1596 return 0;
833c5a42
MS
1597
1598out_err:
1599 kfree(fc);
1600 mutex_lock(&virtio_fs_mutex);
1601 virtio_fs_put(fs);
1602 mutex_unlock(&virtio_fs_mutex);
1603 return err;
a62a8ef9
SH
1604}
1605
1606static const struct fs_context_operations virtio_fs_context_ops = {
84c21507 1607 .free = virtio_fs_free_fsc,
1dd53957 1608 .parse_param = virtio_fs_parse_param,
a62a8ef9
SH
1609 .get_tree = virtio_fs_get_tree,
1610};
1611
1612static int virtio_fs_init_fs_context(struct fs_context *fsc)
1613{
1dd53957
VG
1614 struct fuse_fs_context *ctx;
1615
fe0a7bd8
GK
1616 if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1617 return fuse_init_fs_context_submount(fsc);
1618
1dd53957
VG
1619 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1620 if (!ctx)
1621 return -ENOMEM;
1622 fsc->fs_private = ctx;
a62a8ef9
SH
1623 fsc->ops = &virtio_fs_context_ops;
1624 return 0;
1625}
1626
1627static struct file_system_type virtio_fs_type = {
1628 .owner = THIS_MODULE,
1629 .name = "virtiofs",
1630 .init_fs_context = virtio_fs_init_fs_context,
1631 .kill_sb = virtio_kill_sb,
1632};
1633
9086b2d9
SH
1634static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
1635{
1636 const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
1637
1638 add_uevent_var(env, "TAG=%s", fs->tag);
1639 return 0;
1640}
1641
1642static const struct kset_uevent_ops virtio_fs_uevent_ops = {
1643 .uevent = virtio_fs_uevent,
1644};
1645
a8f62f50
SH
1646static int __init virtio_fs_sysfs_init(void)
1647{
9086b2d9
SH
1648 virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops,
1649 fs_kobj);
a8f62f50
SH
1650 if (!virtio_fs_kset)
1651 return -ENOMEM;
1652 return 0;
1653}
1654
d30ff898 1655static void virtio_fs_sysfs_exit(void)
a8f62f50
SH
1656{
1657 kset_unregister(virtio_fs_kset);
1658 virtio_fs_kset = NULL;
1659}
1660
a62a8ef9
SH
1661static int __init virtio_fs_init(void)
1662{
1663 int ret;
1664
a8f62f50 1665 ret = virtio_fs_sysfs_init();
a62a8ef9
SH
1666 if (ret < 0)
1667 return ret;
1668
a8f62f50
SH
1669 ret = register_virtio_driver(&virtio_fs_driver);
1670 if (ret < 0)
1671 goto sysfs_exit;
1672
a62a8ef9 1673 ret = register_filesystem(&virtio_fs_type);
a8f62f50
SH
1674 if (ret < 0)
1675 goto unregister_virtio_driver;
a62a8ef9
SH
1676
1677 return 0;
a8f62f50
SH
1678
1679unregister_virtio_driver:
1680 unregister_virtio_driver(&virtio_fs_driver);
1681sysfs_exit:
1682 virtio_fs_sysfs_exit();
1683 return ret;
a62a8ef9
SH
1684}
1685module_init(virtio_fs_init);
1686
1687static void __exit virtio_fs_exit(void)
1688{
1689 unregister_filesystem(&virtio_fs_type);
1690 unregister_virtio_driver(&virtio_fs_driver);
a8f62f50 1691 virtio_fs_sysfs_exit();
a62a8ef9
SH
1692}
1693module_exit(virtio_fs_exit);
1694
1695MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1696MODULE_DESCRIPTION("Virtio Filesystem");
1697MODULE_LICENSE("GPL");
1698MODULE_ALIAS_FS(KBUILD_MODNAME);
1699MODULE_DEVICE_TABLE(virtio, id_table);