Merge tag 'mm-stable-2024-03-13-20-04' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / fs / fuse / virtio_fs.c
CommitLineData
a62a8ef9
SH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
5 */
6
7#include <linux/fs.h>
22f3787e
SH
8#include <linux/dax.h>
9#include <linux/pci.h>
10#include <linux/pfn_t.h>
dc90f084 11#include <linux/memremap.h>
a62a8ef9
SH
12#include <linux/module.h>
13#include <linux/virtio.h>
14#include <linux/virtio_fs.h>
15#include <linux/delay.h>
16#include <linux/fs_context.h>
1dd53957 17#include <linux/fs_parser.h>
a62a8ef9 18#include <linux/highmem.h>
562ce828 19#include <linux/cleanup.h>
22f3787e 20#include <linux/uio.h>
a62a8ef9
SH
21#include "fuse_i.h"
22
a7f0d7aa
CK
23/* Used to help calculate the FUSE connection's max_pages limit for a request's
24 * size. Parts of the struct fuse_req are sliced into scattergather lists in
25 * addition to the pages used, so this can help account for that overhead.
26 */
27#define FUSE_HEADER_OVERHEAD 4
28
a62a8ef9
SH
29/* List of virtio-fs device instances and a lock for the list. Also provides
30 * mutual exclusion in device removal and mounting path
31 */
32static DEFINE_MUTEX(virtio_fs_mutex);
33static LIST_HEAD(virtio_fs_instances);
34
35enum {
36 VQ_HIPRIO,
37 VQ_REQUEST
38};
39
b43b7e81
VG
40#define VQ_NAME_LEN 24
41
a62a8ef9
SH
42/* Per-virtqueue state */
43struct virtio_fs_vq {
44 spinlock_t lock;
45 struct virtqueue *vq; /* protected by ->lock */
46 struct work_struct done_work;
47 struct list_head queued_reqs;
51fecdd2 48 struct list_head end_reqs; /* End these requests */
a62a8ef9
SH
49 struct delayed_work dispatch_work;
50 struct fuse_dev *fud;
51 bool connected;
52 long in_flight;
724c15a4 53 struct completion in_flight_zero; /* No inflight requests */
b43b7e81 54 char name[VQ_NAME_LEN];
a62a8ef9
SH
55} ____cacheline_aligned_in_smp;
56
57/* A virtio-fs device instance */
58struct virtio_fs {
59 struct kref refcount;
60 struct list_head list; /* on virtio_fs_instances */
61 char *tag;
62 struct virtio_fs_vq *vqs;
63 unsigned int nvqs; /* number of virtqueues */
64 unsigned int num_request_queues; /* number of request queues */
22f3787e
SH
65 struct dax_device *dax_dev;
66
67 /* DAX memory window where file contents are mapped */
68 void *window_kaddr;
69 phys_addr_t window_phys_addr;
70 size_t window_len;
a62a8ef9
SH
71};
72
1efcf39e 73struct virtio_fs_forget_req {
a62a8ef9
SH
74 struct fuse_in_header ih;
75 struct fuse_forget_in arg;
1efcf39e
VG
76};
77
78struct virtio_fs_forget {
a62a8ef9
SH
79 /* This request can be temporarily queued on virt queue */
80 struct list_head list;
1efcf39e 81 struct virtio_fs_forget_req req;
a62a8ef9
SH
82};
83
bb737bbe
VG
84struct virtio_fs_req_work {
85 struct fuse_req *req;
86 struct virtio_fs_vq *fsvq;
87 struct work_struct done_work;
88};
89
a9bfd9dd
VG
90static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
91 struct fuse_req *req, bool in_flight);
92
780b1b95
JX
93static const struct constant_table dax_param_enums[] = {
94 {"always", FUSE_DAX_ALWAYS },
95 {"never", FUSE_DAX_NEVER },
96 {"inode", FUSE_DAX_INODE_USER },
97 {}
98};
99
1dd53957
VG
100enum {
101 OPT_DAX,
780b1b95 102 OPT_DAX_ENUM,
1dd53957
VG
103};
104
105static const struct fs_parameter_spec virtio_fs_parameters[] = {
106 fsparam_flag("dax", OPT_DAX),
780b1b95 107 fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
1dd53957
VG
108 {}
109};
110
84c21507 111static int virtio_fs_parse_param(struct fs_context *fsc,
1dd53957
VG
112 struct fs_parameter *param)
113{
114 struct fs_parse_result result;
84c21507 115 struct fuse_fs_context *ctx = fsc->fs_private;
1dd53957
VG
116 int opt;
117
84c21507 118 opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
1dd53957
VG
119 if (opt < 0)
120 return opt;
121
122 switch (opt) {
123 case OPT_DAX:
780b1b95
JX
124 ctx->dax_mode = FUSE_DAX_ALWAYS;
125 break;
126 case OPT_DAX_ENUM:
127 ctx->dax_mode = result.uint_32;
1dd53957
VG
128 break;
129 default:
130 return -EINVAL;
131 }
132
133 return 0;
134}
135
84c21507 136static void virtio_fs_free_fsc(struct fs_context *fsc)
1dd53957 137{
84c21507 138 struct fuse_fs_context *ctx = fsc->fs_private;
1dd53957
VG
139
140 kfree(ctx);
141}
142
a62a8ef9
SH
143static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
144{
145 struct virtio_fs *fs = vq->vdev->priv;
146
147 return &fs->vqs[vq->index];
148}
149
c17ea009
VG
150/* Should be called with fsvq->lock held. */
151static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
152{
153 fsvq->in_flight++;
154}
155
156/* Should be called with fsvq->lock held. */
157static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
158{
159 WARN_ON(fsvq->in_flight <= 0);
160 fsvq->in_flight--;
724c15a4
VG
161 if (!fsvq->in_flight)
162 complete(&fsvq->in_flight_zero);
c17ea009
VG
163}
164
a62a8ef9
SH
165static void release_virtio_fs_obj(struct kref *ref)
166{
167 struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
168
169 kfree(vfs->vqs);
170 kfree(vfs);
171}
172
173/* Make sure virtiofs_mutex is held */
174static void virtio_fs_put(struct virtio_fs *fs)
175{
176 kref_put(&fs->refcount, release_virtio_fs_obj);
177}
178
179static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
180{
181 struct virtio_fs *vfs = fiq->priv;
182
183 mutex_lock(&virtio_fs_mutex);
184 virtio_fs_put(vfs);
185 mutex_unlock(&virtio_fs_mutex);
186}
187
188static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
189{
190 WARN_ON(fsvq->in_flight < 0);
191
192 /* Wait for in flight requests to finish.*/
724c15a4
VG
193 spin_lock(&fsvq->lock);
194 if (fsvq->in_flight) {
195 /* We are holding virtio_fs_mutex. There should not be any
196 * waiters waiting for completion.
197 */
198 reinit_completion(&fsvq->in_flight_zero);
199 spin_unlock(&fsvq->lock);
200 wait_for_completion(&fsvq->in_flight_zero);
201 } else {
a62a8ef9 202 spin_unlock(&fsvq->lock);
a62a8ef9
SH
203 }
204
205 flush_work(&fsvq->done_work);
206 flush_delayed_work(&fsvq->dispatch_work);
207}
208
724c15a4 209static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
a62a8ef9
SH
210{
211 struct virtio_fs_vq *fsvq;
212 int i;
213
214 for (i = 0; i < fs->nvqs; i++) {
215 fsvq = &fs->vqs[i];
a62a8ef9
SH
216 virtio_fs_drain_queue(fsvq);
217 }
218}
219
724c15a4
VG
220static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
221{
222 /* Provides mutual exclusion between ->remove and ->kill_sb
223 * paths. We don't want both of these draining queue at the
224 * same time. Current completion logic reinits completion
225 * and that means there should not be any other thread
226 * doing reinit or waiting for completion already.
227 */
228 mutex_lock(&virtio_fs_mutex);
229 virtio_fs_drain_all_queues_locked(fs);
230 mutex_unlock(&virtio_fs_mutex);
231}
232
a62a8ef9
SH
233static void virtio_fs_start_all_queues(struct virtio_fs *fs)
234{
235 struct virtio_fs_vq *fsvq;
236 int i;
237
238 for (i = 0; i < fs->nvqs; i++) {
239 fsvq = &fs->vqs[i];
240 spin_lock(&fsvq->lock);
241 fsvq->connected = true;
242 spin_unlock(&fsvq->lock);
243 }
244}
245
246/* Add a new instance to the list or return -EEXIST if tag name exists*/
247static int virtio_fs_add_instance(struct virtio_fs *fs)
248{
249 struct virtio_fs *fs2;
250 bool duplicate = false;
251
252 mutex_lock(&virtio_fs_mutex);
253
254 list_for_each_entry(fs2, &virtio_fs_instances, list) {
255 if (strcmp(fs->tag, fs2->tag) == 0)
256 duplicate = true;
257 }
258
259 if (!duplicate)
260 list_add_tail(&fs->list, &virtio_fs_instances);
261
262 mutex_unlock(&virtio_fs_mutex);
263
264 if (duplicate)
265 return -EEXIST;
266 return 0;
267}
268
269/* Return the virtio_fs with a given tag, or NULL */
270static struct virtio_fs *virtio_fs_find_instance(const char *tag)
271{
272 struct virtio_fs *fs;
273
274 mutex_lock(&virtio_fs_mutex);
275
276 list_for_each_entry(fs, &virtio_fs_instances, list) {
277 if (strcmp(fs->tag, tag) == 0) {
278 kref_get(&fs->refcount);
279 goto found;
280 }
281 }
282
283 fs = NULL; /* not found */
284
285found:
286 mutex_unlock(&virtio_fs_mutex);
287
288 return fs;
289}
290
291static void virtio_fs_free_devs(struct virtio_fs *fs)
292{
293 unsigned int i;
294
295 for (i = 0; i < fs->nvqs; i++) {
296 struct virtio_fs_vq *fsvq = &fs->vqs[i];
297
298 if (!fsvq->fud)
299 continue;
300
301 fuse_dev_free(fsvq->fud);
302 fsvq->fud = NULL;
303 }
304}
305
306/* Read filesystem name from virtio config into fs->tag (must kfree()). */
307static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
308{
309 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
310 char *end;
311 size_t len;
312
313 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
314 &tag_buf, sizeof(tag_buf));
315 end = memchr(tag_buf, '\0', sizeof(tag_buf));
316 if (end == tag_buf)
317 return -EINVAL; /* empty tag */
318 if (!end)
319 end = &tag_buf[sizeof(tag_buf)];
320
321 len = end - tag_buf;
322 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
323 if (!fs->tag)
324 return -ENOMEM;
325 memcpy(fs->tag, tag_buf, len);
326 fs->tag[len] = '\0';
327 return 0;
328}
329
330/* Work function for hiprio completion */
331static void virtio_fs_hiprio_done_work(struct work_struct *work)
332{
333 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
334 done_work);
335 struct virtqueue *vq = fsvq->vq;
336
337 /* Free completed FUSE_FORGET requests */
338 spin_lock(&fsvq->lock);
339 do {
340 unsigned int len;
341 void *req;
342
343 virtqueue_disable_cb(vq);
344
345 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
346 kfree(req);
c17ea009 347 dec_in_flight_req(fsvq);
a62a8ef9
SH
348 }
349 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
350 spin_unlock(&fsvq->lock);
351}
352
51fecdd2 353static void virtio_fs_request_dispatch_work(struct work_struct *work)
a62a8ef9 354{
51fecdd2
VG
355 struct fuse_req *req;
356 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
357 dispatch_work.work);
a9bfd9dd 358 int ret;
51fecdd2
VG
359
360 pr_debug("virtio-fs: worker %s called.\n", __func__);
361 while (1) {
362 spin_lock(&fsvq->lock);
363 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
364 list);
365 if (!req) {
366 spin_unlock(&fsvq->lock);
a9bfd9dd 367 break;
51fecdd2
VG
368 }
369
370 list_del_init(&req->list);
371 spin_unlock(&fsvq->lock);
8f622e94 372 fuse_request_end(req);
51fecdd2 373 }
a9bfd9dd
VG
374
375 /* Dispatch pending requests */
376 while (1) {
377 spin_lock(&fsvq->lock);
378 req = list_first_entry_or_null(&fsvq->queued_reqs,
379 struct fuse_req, list);
380 if (!req) {
381 spin_unlock(&fsvq->lock);
382 return;
383 }
384 list_del_init(&req->list);
385 spin_unlock(&fsvq->lock);
386
387 ret = virtio_fs_enqueue_req(fsvq, req, true);
388 if (ret < 0) {
389 if (ret == -ENOMEM || ret == -ENOSPC) {
390 spin_lock(&fsvq->lock);
391 list_add_tail(&req->list, &fsvq->queued_reqs);
392 schedule_delayed_work(&fsvq->dispatch_work,
393 msecs_to_jiffies(1));
394 spin_unlock(&fsvq->lock);
395 return;
396 }
397 req->out.h.error = ret;
398 spin_lock(&fsvq->lock);
399 dec_in_flight_req(fsvq);
400 spin_unlock(&fsvq->lock);
401 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
402 ret);
8f622e94 403 fuse_request_end(req);
a9bfd9dd
VG
404 }
405 }
a62a8ef9
SH
406}
407
58ada94f
VG
408/*
409 * Returns 1 if queue is full and sender should wait a bit before sending
410 * next request, 0 otherwise.
411 */
412static int send_forget_request(struct virtio_fs_vq *fsvq,
413 struct virtio_fs_forget *forget,
414 bool in_flight)
415{
416 struct scatterlist sg;
417 struct virtqueue *vq;
418 int ret = 0;
419 bool notify;
1efcf39e 420 struct virtio_fs_forget_req *req = &forget->req;
58ada94f
VG
421
422 spin_lock(&fsvq->lock);
423 if (!fsvq->connected) {
424 if (in_flight)
425 dec_in_flight_req(fsvq);
426 kfree(forget);
427 goto out;
428 }
429
1efcf39e 430 sg_init_one(&sg, req, sizeof(*req));
58ada94f
VG
431 vq = fsvq->vq;
432 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
433
434 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
435 if (ret < 0) {
436 if (ret == -ENOMEM || ret == -ENOSPC) {
437 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
438 ret);
439 list_add_tail(&forget->list, &fsvq->queued_reqs);
440 schedule_delayed_work(&fsvq->dispatch_work,
441 msecs_to_jiffies(1));
442 if (!in_flight)
443 inc_in_flight_req(fsvq);
444 /* Queue is full */
445 ret = 1;
446 } else {
447 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
448 ret);
449 kfree(forget);
450 if (in_flight)
451 dec_in_flight_req(fsvq);
452 }
453 goto out;
454 }
455
456 if (!in_flight)
457 inc_in_flight_req(fsvq);
458 notify = virtqueue_kick_prepare(vq);
459 spin_unlock(&fsvq->lock);
460
461 if (notify)
462 virtqueue_notify(vq);
463 return ret;
464out:
465 spin_unlock(&fsvq->lock);
466 return ret;
467}
468
a62a8ef9
SH
469static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
470{
471 struct virtio_fs_forget *forget;
472 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
473 dispatch_work.work);
a62a8ef9
SH
474 pr_debug("virtio-fs: worker %s called.\n", __func__);
475 while (1) {
476 spin_lock(&fsvq->lock);
477 forget = list_first_entry_or_null(&fsvq->queued_reqs,
478 struct virtio_fs_forget, list);
479 if (!forget) {
480 spin_unlock(&fsvq->lock);
481 return;
482 }
483
484 list_del(&forget->list);
a62a8ef9 485 spin_unlock(&fsvq->lock);
58ada94f
VG
486 if (send_forget_request(fsvq, forget, true))
487 return;
a62a8ef9
SH
488 }
489}
490
491/* Allocate and copy args into req->argbuf */
492static int copy_args_to_argbuf(struct fuse_req *req)
493{
494 struct fuse_args *args = req->args;
495 unsigned int offset = 0;
496 unsigned int num_in;
497 unsigned int num_out;
498 unsigned int len;
499 unsigned int i;
500
501 num_in = args->in_numargs - args->in_pages;
502 num_out = args->out_numargs - args->out_pages;
503 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
504 fuse_len_args(num_out, args->out_args);
505
506 req->argbuf = kmalloc(len, GFP_ATOMIC);
507 if (!req->argbuf)
508 return -ENOMEM;
509
510 for (i = 0; i < num_in; i++) {
511 memcpy(req->argbuf + offset,
512 args->in_args[i].value,
513 args->in_args[i].size);
514 offset += args->in_args[i].size;
515 }
516
517 return 0;
518}
519
520/* Copy args out of and free req->argbuf */
521static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
522{
523 unsigned int remaining;
524 unsigned int offset;
525 unsigned int num_in;
526 unsigned int num_out;
527 unsigned int i;
528
529 remaining = req->out.h.len - sizeof(req->out.h);
530 num_in = args->in_numargs - args->in_pages;
531 num_out = args->out_numargs - args->out_pages;
532 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
533
534 for (i = 0; i < num_out; i++) {
535 unsigned int argsize = args->out_args[i].size;
536
537 if (args->out_argvar &&
538 i == args->out_numargs - 1 &&
539 argsize > remaining) {
540 argsize = remaining;
541 }
542
543 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
544 offset += argsize;
545
546 if (i != args->out_numargs - 1)
547 remaining -= argsize;
548 }
549
550 /* Store the actual size of the variable-length arg */
551 if (args->out_argvar)
552 args->out_args[args->out_numargs - 1].size = remaining;
553
554 kfree(req->argbuf);
555 req->argbuf = NULL;
556}
557
558/* Work function for request completion */
bb737bbe
VG
559static void virtio_fs_request_complete(struct fuse_req *req,
560 struct virtio_fs_vq *fsvq)
561{
562 struct fuse_pqueue *fpq = &fsvq->fud->pq;
bb737bbe
VG
563 struct fuse_args *args;
564 struct fuse_args_pages *ap;
565 unsigned int len, i, thislen;
566 struct page *page;
567
568 /*
569 * TODO verify that server properly follows FUSE protocol
570 * (oh.uniq, oh.len)
571 */
572 args = req->args;
573 copy_args_from_argbuf(args, req);
574
575 if (args->out_pages && args->page_zeroing) {
576 len = args->out_args[args->out_numargs - 1].size;
577 ap = container_of(args, typeof(*ap), args);
578 for (i = 0; i < ap->num_pages; i++) {
579 thislen = ap->descs[i].length;
580 if (len < thislen) {
581 WARN_ON(ap->descs[i].offset);
582 page = ap->pages[i];
583 zero_user_segment(page, len, thislen);
584 len = 0;
585 } else {
586 len -= thislen;
587 }
588 }
589 }
590
591 spin_lock(&fpq->lock);
592 clear_bit(FR_SENT, &req->flags);
593 spin_unlock(&fpq->lock);
594
8f622e94 595 fuse_request_end(req);
bb737bbe
VG
596 spin_lock(&fsvq->lock);
597 dec_in_flight_req(fsvq);
598 spin_unlock(&fsvq->lock);
599}
600
601static void virtio_fs_complete_req_work(struct work_struct *work)
602{
603 struct virtio_fs_req_work *w =
604 container_of(work, typeof(*w), done_work);
605
606 virtio_fs_request_complete(w->req, w->fsvq);
607 kfree(w);
608}
609
a62a8ef9
SH
610static void virtio_fs_requests_done_work(struct work_struct *work)
611{
612 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
613 done_work);
614 struct fuse_pqueue *fpq = &fsvq->fud->pq;
a62a8ef9
SH
615 struct virtqueue *vq = fsvq->vq;
616 struct fuse_req *req;
a62a8ef9 617 struct fuse_req *next;
bb737bbe 618 unsigned int len;
a62a8ef9
SH
619 LIST_HEAD(reqs);
620
621 /* Collect completed requests off the virtqueue */
622 spin_lock(&fsvq->lock);
623 do {
624 virtqueue_disable_cb(vq);
625
626 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
627 spin_lock(&fpq->lock);
628 list_move_tail(&req->list, &reqs);
629 spin_unlock(&fpq->lock);
630 }
631 } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
632 spin_unlock(&fsvq->lock);
633
634 /* End requests */
635 list_for_each_entry_safe(req, next, &reqs, list) {
a62a8ef9 636 list_del_init(&req->list);
a62a8ef9 637
bb737bbe
VG
638 /* blocking async request completes in a worker context */
639 if (req->args->may_block) {
640 struct virtio_fs_req_work *w;
641
642 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
643 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
644 w->fsvq = fsvq;
645 w->req = req;
646 schedule_work(&w->done_work);
647 } else {
648 virtio_fs_request_complete(req, fsvq);
649 }
a62a8ef9
SH
650 }
651}
652
653/* Virtqueue interrupt handler */
654static void virtio_fs_vq_done(struct virtqueue *vq)
655{
656 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
657
658 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
659
660 schedule_work(&fsvq->done_work);
661}
662
b43b7e81
VG
663static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
664 int vq_type)
665{
7c594bbd 666 strscpy(fsvq->name, name, VQ_NAME_LEN);
b43b7e81
VG
667 spin_lock_init(&fsvq->lock);
668 INIT_LIST_HEAD(&fsvq->queued_reqs);
669 INIT_LIST_HEAD(&fsvq->end_reqs);
670 init_completion(&fsvq->in_flight_zero);
671
672 if (vq_type == VQ_REQUEST) {
673 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
674 INIT_DELAYED_WORK(&fsvq->dispatch_work,
675 virtio_fs_request_dispatch_work);
676 } else {
677 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
678 INIT_DELAYED_WORK(&fsvq->dispatch_work,
679 virtio_fs_hiprio_dispatch_work);
680 }
681}
682
a62a8ef9
SH
683/* Initialize virtqueues */
684static int virtio_fs_setup_vqs(struct virtio_device *vdev,
685 struct virtio_fs *fs)
686{
687 struct virtqueue **vqs;
688 vq_callback_t **callbacks;
689 const char **names;
690 unsigned int i;
691 int ret = 0;
692
2c0349ec
MT
693 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
694 &fs->num_request_queues);
a62a8ef9
SH
695 if (fs->num_request_queues == 0)
696 return -EINVAL;
697
b43b7e81 698 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
a62a8ef9
SH
699 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
700 if (!fs->vqs)
701 return -ENOMEM;
702
703 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
704 callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
705 GFP_KERNEL);
706 names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
707 if (!vqs || !callbacks || !names) {
708 ret = -ENOMEM;
709 goto out;
710 }
711
b43b7e81 712 /* Initialize the hiprio/forget request virtqueue */
a62a8ef9 713 callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
b43b7e81 714 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
a62a8ef9 715 names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
a62a8ef9
SH
716
717 /* Initialize the requests virtqueues */
718 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
b43b7e81
VG
719 char vq_name[VQ_NAME_LEN];
720
721 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
722 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
a62a8ef9
SH
723 callbacks[i] = virtio_fs_vq_done;
724 names[i] = fs->vqs[i].name;
725 }
726
727 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
728 if (ret < 0)
729 goto out;
730
731 for (i = 0; i < fs->nvqs; i++)
732 fs->vqs[i].vq = vqs[i];
733
734 virtio_fs_start_all_queues(fs);
735out:
736 kfree(names);
737 kfree(callbacks);
738 kfree(vqs);
739 if (ret)
740 kfree(fs->vqs);
741 return ret;
742}
743
744/* Free virtqueues (device must already be reset) */
1e5b9e04 745static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
a62a8ef9
SH
746{
747 vdev->config->del_vqs(vdev);
748}
749
22f3787e
SH
750/* Map a window offset to a page frame number. The window offset will have
751 * been produced by .iomap_begin(), which maps a file offset to a window
752 * offset.
753 */
754static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
e511c4a3
JC
755 long nr_pages, enum dax_access_mode mode,
756 void **kaddr, pfn_t *pfn)
22f3787e
SH
757{
758 struct virtio_fs *fs = dax_get_private(dax_dev);
759 phys_addr_t offset = PFN_PHYS(pgoff);
73fb2c8b 760 size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
22f3787e
SH
761
762 if (kaddr)
763 *kaddr = fs->window_kaddr + offset;
764 if (pfn)
765 *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
766 PFN_DEV | PFN_MAP);
767 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
768}
769
22f3787e
SH
770static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
771 pgoff_t pgoff, size_t nr_pages)
772{
773 long rc;
774 void *kaddr;
775
e511c4a3
JC
776 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
777 NULL);
22f3787e 778 if (rc < 0)
1ea7ca1b
JC
779 return dax_mem2blk_err(rc);
780
22f3787e
SH
781 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
782 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
783 return 0;
784}
785
786static const struct dax_operations virtio_fs_dax_ops = {
787 .direct_access = virtio_fs_direct_access,
22f3787e
SH
788 .zero_page_range = virtio_fs_zero_page_range,
789};
790
791static void virtio_fs_cleanup_dax(void *data)
792{
793 struct dax_device *dax_dev = data;
794
795 kill_dax(dax_dev);
796 put_dax(dax_dev);
797}
798
562ce828
MD
799DEFINE_FREE(cleanup_dax, struct dax_dev *, if (!IS_ERR_OR_NULL(_T)) virtio_fs_cleanup_dax(_T))
800
22f3787e
SH
801static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
802{
562ce828 803 struct dax_device *dax_dev __free(cleanup_dax) = NULL;
22f3787e
SH
804 struct virtio_shm_region cache_reg;
805 struct dev_pagemap *pgmap;
806 bool have_cache;
807
808 if (!IS_ENABLED(CONFIG_FUSE_DAX))
809 return 0;
810
562ce828
MD
811 dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
812 if (IS_ERR(dax_dev)) {
813 int rc = PTR_ERR(dax_dev);
814 return rc == -EOPNOTSUPP ? 0 : rc;
815 }
816
22f3787e
SH
817 /* Get cache region */
818 have_cache = virtio_get_shm_region(vdev, &cache_reg,
819 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
820 if (!have_cache) {
821 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
822 return 0;
823 }
824
825 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
826 dev_name(&vdev->dev))) {
827 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
828 cache_reg.addr, cache_reg.len);
829 return -EBUSY;
830 }
831
832 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
833 cache_reg.addr);
834
835 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
836 if (!pgmap)
837 return -ENOMEM;
838
839 pgmap->type = MEMORY_DEVICE_FS_DAX;
840
841 /* Ideally we would directly use the PCI BAR resource but
842 * devm_memremap_pages() wants its own copy in pgmap. So
843 * initialize a struct resource from scratch (only the start
844 * and end fields will be used).
845 */
69456535 846 pgmap->range = (struct range) {
22f3787e
SH
847 .start = (phys_addr_t) cache_reg.addr,
848 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
849 };
69456535 850 pgmap->nr_range = 1;
22f3787e
SH
851
852 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
853 if (IS_ERR(fs->window_kaddr))
854 return PTR_ERR(fs->window_kaddr);
855
856 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
857 fs->window_len = (phys_addr_t) cache_reg.len;
858
859 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
860 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
861
562ce828 862 fs->dax_dev = no_free_ptr(dax_dev);
22f3787e
SH
863 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
864 fs->dax_dev);
865}
866
a62a8ef9
SH
867static int virtio_fs_probe(struct virtio_device *vdev)
868{
869 struct virtio_fs *fs;
870 int ret;
871
872 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
873 if (!fs)
874 return -ENOMEM;
875 kref_init(&fs->refcount);
876 vdev->priv = fs;
877
878 ret = virtio_fs_read_tag(vdev, fs);
879 if (ret < 0)
880 goto out;
881
882 ret = virtio_fs_setup_vqs(vdev, fs);
883 if (ret < 0)
884 goto out;
885
886 /* TODO vq affinity */
887
22f3787e
SH
888 ret = virtio_fs_setup_dax(vdev, fs);
889 if (ret < 0)
890 goto out_vqs;
891
a62a8ef9
SH
892 /* Bring the device online in case the filesystem is mounted and
893 * requests need to be sent before we return.
894 */
895 virtio_device_ready(vdev);
896
897 ret = virtio_fs_add_instance(fs);
898 if (ret < 0)
899 goto out_vqs;
900
901 return 0;
902
903out_vqs:
d9679d00 904 virtio_reset_device(vdev);
1e5b9e04 905 virtio_fs_cleanup_vqs(vdev);
c79c5e01 906 kfree(fs->vqs);
a62a8ef9
SH
907
908out:
909 vdev->priv = NULL;
910 kfree(fs);
911 return ret;
912}
913
914static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
915{
916 struct virtio_fs_vq *fsvq;
917 int i;
918
919 for (i = 0; i < fs->nvqs; i++) {
920 fsvq = &fs->vqs[i];
921 spin_lock(&fsvq->lock);
922 fsvq->connected = false;
923 spin_unlock(&fsvq->lock);
924 }
925}
926
927static void virtio_fs_remove(struct virtio_device *vdev)
928{
929 struct virtio_fs *fs = vdev->priv;
930
931 mutex_lock(&virtio_fs_mutex);
932 /* This device is going away. No one should get new reference */
933 list_del_init(&fs->list);
934 virtio_fs_stop_all_queues(fs);
724c15a4 935 virtio_fs_drain_all_queues_locked(fs);
d9679d00 936 virtio_reset_device(vdev);
1e5b9e04 937 virtio_fs_cleanup_vqs(vdev);
a62a8ef9
SH
938
939 vdev->priv = NULL;
940 /* Put device reference on virtio_fs object */
941 virtio_fs_put(fs);
942 mutex_unlock(&virtio_fs_mutex);
943}
944
945#ifdef CONFIG_PM_SLEEP
946static int virtio_fs_freeze(struct virtio_device *vdev)
947{
948 /* TODO need to save state here */
949 pr_warn("virtio-fs: suspend/resume not yet supported\n");
950 return -EOPNOTSUPP;
951}
952
953static int virtio_fs_restore(struct virtio_device *vdev)
954{
955 /* TODO need to restore state here */
956 return 0;
957}
958#endif /* CONFIG_PM_SLEEP */
959
00929447 960static const struct virtio_device_id id_table[] = {
a62a8ef9
SH
961 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
962 {},
963};
964
00929447 965static const unsigned int feature_table[] = {};
a62a8ef9
SH
966
967static struct virtio_driver virtio_fs_driver = {
968 .driver.name = KBUILD_MODNAME,
969 .driver.owner = THIS_MODULE,
970 .id_table = id_table,
971 .feature_table = feature_table,
972 .feature_table_size = ARRAY_SIZE(feature_table),
973 .probe = virtio_fs_probe,
974 .remove = virtio_fs_remove,
975#ifdef CONFIG_PM_SLEEP
976 .freeze = virtio_fs_freeze,
977 .restore = virtio_fs_restore,
978#endif
979};
980
981static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
982__releases(fiq->lock)
983{
984 struct fuse_forget_link *link;
985 struct virtio_fs_forget *forget;
1efcf39e 986 struct virtio_fs_forget_req *req;
a62a8ef9 987 struct virtio_fs *fs;
a62a8ef9 988 struct virtio_fs_vq *fsvq;
a62a8ef9 989 u64 unique;
a62a8ef9
SH
990
991 link = fuse_dequeue_forget(fiq, 1, NULL);
992 unique = fuse_get_unique(fiq);
993
994 fs = fiq->priv;
995 fsvq = &fs->vqs[VQ_HIPRIO];
996 spin_unlock(&fiq->lock);
997
998 /* Allocate a buffer for the request */
999 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1efcf39e 1000 req = &forget->req;
a62a8ef9 1001
1efcf39e 1002 req->ih = (struct fuse_in_header){
a62a8ef9
SH
1003 .opcode = FUSE_FORGET,
1004 .nodeid = link->forget_one.nodeid,
1005 .unique = unique,
1efcf39e 1006 .len = sizeof(*req),
a62a8ef9 1007 };
1efcf39e 1008 req->arg = (struct fuse_forget_in){
a62a8ef9
SH
1009 .nlookup = link->forget_one.nlookup,
1010 };
1011
58ada94f 1012 send_forget_request(fsvq, forget, false);
a62a8ef9
SH
1013 kfree(link);
1014}
1015
1016static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1017__releases(fiq->lock)
1018{
1019 /*
1020 * TODO interrupts.
1021 *
1022 * Normal fs operations on a local filesystems aren't interruptible.
1023 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1024 * with shared lock between host and guest.
1025 */
1026 spin_unlock(&fiq->lock);
1027}
1028
42d3e2d0
VG
1029/* Count number of scatter-gather elements required */
1030static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
1031 unsigned int num_pages,
1032 unsigned int total_len)
1033{
1034 unsigned int i;
1035 unsigned int this_len;
1036
1037 for (i = 0; i < num_pages && total_len; i++) {
1038 this_len = min(page_descs[i].length, total_len);
1039 total_len -= this_len;
1040 }
1041
1042 return i;
1043}
1044
a62a8ef9
SH
1045/* Return the number of scatter-gather list elements required */
1046static unsigned int sg_count_fuse_req(struct fuse_req *req)
1047{
1048 struct fuse_args *args = req->args;
1049 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
42d3e2d0 1050 unsigned int size, total_sgs = 1 /* fuse_in_header */;
a62a8ef9
SH
1051
1052 if (args->in_numargs - args->in_pages)
1053 total_sgs += 1;
1054
42d3e2d0
VG
1055 if (args->in_pages) {
1056 size = args->in_args[args->in_numargs - 1].size;
1057 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1058 size);
1059 }
a62a8ef9
SH
1060
1061 if (!test_bit(FR_ISREPLY, &req->flags))
1062 return total_sgs;
1063
1064 total_sgs += 1 /* fuse_out_header */;
1065
1066 if (args->out_numargs - args->out_pages)
1067 total_sgs += 1;
1068
42d3e2d0
VG
1069 if (args->out_pages) {
1070 size = args->out_args[args->out_numargs - 1].size;
1071 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
1072 size);
1073 }
a62a8ef9
SH
1074
1075 return total_sgs;
1076}
1077
1078/* Add pages to scatter-gather list and return number of elements used */
1079static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
1080 struct page **pages,
1081 struct fuse_page_desc *page_descs,
1082 unsigned int num_pages,
1083 unsigned int total_len)
1084{
1085 unsigned int i;
1086 unsigned int this_len;
1087
1088 for (i = 0; i < num_pages && total_len; i++) {
1089 sg_init_table(&sg[i], 1);
1090 this_len = min(page_descs[i].length, total_len);
1091 sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
1092 total_len -= this_len;
1093 }
1094
1095 return i;
1096}
1097
1098/* Add args to scatter-gather list and return number of elements used */
1099static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1100 struct fuse_req *req,
1101 struct fuse_arg *args,
1102 unsigned int numargs,
1103 bool argpages,
1104 void *argbuf,
1105 unsigned int *len_used)
1106{
1107 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1108 unsigned int total_sgs = 0;
1109 unsigned int len;
1110
1111 len = fuse_len_args(numargs - argpages, args);
1112 if (len)
1113 sg_init_one(&sg[total_sgs++], argbuf, len);
1114
1115 if (argpages)
1116 total_sgs += sg_init_fuse_pages(&sg[total_sgs],
1117 ap->pages, ap->descs,
1118 ap->num_pages,
1119 args[numargs - 1].size);
1120
1121 if (len_used)
1122 *len_used = len;
1123
1124 return total_sgs;
1125}
1126
1127/* Add a request to a virtqueue and kick the device */
1128static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
a9bfd9dd 1129 struct fuse_req *req, bool in_flight)
a62a8ef9
SH
1130{
1131 /* requests need at least 4 elements */
1132 struct scatterlist *stack_sgs[6];
1133 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1134 struct scatterlist **sgs = stack_sgs;
1135 struct scatterlist *sg = stack_sg;
1136 struct virtqueue *vq;
1137 struct fuse_args *args = req->args;
1138 unsigned int argbuf_used = 0;
1139 unsigned int out_sgs = 0;
1140 unsigned int in_sgs = 0;
1141 unsigned int total_sgs;
1142 unsigned int i;
1143 int ret;
1144 bool notify;
5dbe190f 1145 struct fuse_pqueue *fpq;
a62a8ef9
SH
1146
1147 /* Does the sglist fit on the stack? */
1148 total_sgs = sg_count_fuse_req(req);
1149 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1150 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
1151 sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
1152 if (!sgs || !sg) {
1153 ret = -ENOMEM;
1154 goto out;
1155 }
1156 }
1157
1158 /* Use a bounce buffer since stack args cannot be mapped */
1159 ret = copy_args_to_argbuf(req);
1160 if (ret < 0)
1161 goto out;
1162
1163 /* Request elements */
1164 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1165 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1166 (struct fuse_arg *)args->in_args,
1167 args->in_numargs, args->in_pages,
1168 req->argbuf, &argbuf_used);
1169
1170 /* Reply elements */
1171 if (test_bit(FR_ISREPLY, &req->flags)) {
1172 sg_init_one(&sg[out_sgs + in_sgs++],
1173 &req->out.h, sizeof(req->out.h));
1174 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1175 args->out_args, args->out_numargs,
1176 args->out_pages,
1177 req->argbuf + argbuf_used, NULL);
1178 }
1179
1180 WARN_ON(out_sgs + in_sgs != total_sgs);
1181
1182 for (i = 0; i < total_sgs; i++)
1183 sgs[i] = &sg[i];
1184
1185 spin_lock(&fsvq->lock);
1186
1187 if (!fsvq->connected) {
1188 spin_unlock(&fsvq->lock);
1189 ret = -ENOTCONN;
1190 goto out;
1191 }
1192
1193 vq = fsvq->vq;
1194 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1195 if (ret < 0) {
1196 spin_unlock(&fsvq->lock);
1197 goto out;
1198 }
1199
5dbe190f
VG
1200 /* Request successfully sent. */
1201 fpq = &fsvq->fud->pq;
1202 spin_lock(&fpq->lock);
1203 list_add_tail(&req->list, fpq->processing);
1204 spin_unlock(&fpq->lock);
1205 set_bit(FR_SENT, &req->flags);
1206 /* matches barrier in request_wait_answer() */
1207 smp_mb__after_atomic();
1208
a9bfd9dd
VG
1209 if (!in_flight)
1210 inc_in_flight_req(fsvq);
a62a8ef9
SH
1211 notify = virtqueue_kick_prepare(vq);
1212
1213 spin_unlock(&fsvq->lock);
1214
1215 if (notify)
1216 virtqueue_notify(vq);
1217
1218out:
1219 if (ret < 0 && req->argbuf) {
1220 kfree(req->argbuf);
1221 req->argbuf = NULL;
1222 }
1223 if (sgs != stack_sgs) {
1224 kfree(sgs);
1225 kfree(sg);
1226 }
1227
1228 return ret;
1229}
1230
1231static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1232__releases(fiq->lock)
1233{
1234 unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
1235 struct virtio_fs *fs;
a62a8ef9 1236 struct fuse_req *req;
51fecdd2 1237 struct virtio_fs_vq *fsvq;
a62a8ef9
SH
1238 int ret;
1239
1240 WARN_ON(list_empty(&fiq->pending));
1241 req = list_last_entry(&fiq->pending, struct fuse_req, list);
1242 clear_bit(FR_PENDING, &req->flags);
1243 list_del_init(&req->list);
1244 WARN_ON(!list_empty(&fiq->pending));
1245 spin_unlock(&fiq->lock);
1246
1247 fs = fiq->priv;
a62a8ef9
SH
1248
1249 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
1250 __func__, req->in.h.opcode, req->in.h.unique,
1251 req->in.h.nodeid, req->in.h.len,
1252 fuse_len_args(req->args->out_numargs, req->args->out_args));
1253
51fecdd2 1254 fsvq = &fs->vqs[queue_id];
a9bfd9dd 1255 ret = virtio_fs_enqueue_req(fsvq, req, false);
a62a8ef9
SH
1256 if (ret < 0) {
1257 if (ret == -ENOMEM || ret == -ENOSPC) {
a9bfd9dd
VG
1258 /*
1259 * Virtqueue full. Retry submission from worker
1260 * context as we might be holding fc->bg_lock.
1261 */
1262 spin_lock(&fsvq->lock);
1263 list_add_tail(&req->list, &fsvq->queued_reqs);
1264 inc_in_flight_req(fsvq);
1265 schedule_delayed_work(&fsvq->dispatch_work,
1266 msecs_to_jiffies(1));
1267 spin_unlock(&fsvq->lock);
1268 return;
a62a8ef9
SH
1269 }
1270 req->out.h.error = ret;
1271 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
51fecdd2
VG
1272
1273 /* Can't end request in submission context. Use a worker */
1274 spin_lock(&fsvq->lock);
1275 list_add_tail(&req->list, &fsvq->end_reqs);
1276 schedule_delayed_work(&fsvq->dispatch_work, 0);
1277 spin_unlock(&fsvq->lock);
a62a8ef9
SH
1278 return;
1279 }
1280}
1281
00929447 1282static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
a62a8ef9
SH
1283 .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1284 .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1285 .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1286 .release = virtio_fs_fiq_release,
1287};
1288
1dd53957 1289static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
a62a8ef9 1290{
1dd53957
VG
1291 ctx->rootmode = S_IFDIR;
1292 ctx->default_permissions = 1;
1293 ctx->allow_other = 1;
1294 ctx->max_read = UINT_MAX;
1295 ctx->blksize = 512;
1296 ctx->destroy = true;
1297 ctx->no_control = true;
1298 ctx->no_force_umount = true;
1299}
1300
1301static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
a62a8ef9 1302{
fcee216b
MR
1303 struct fuse_mount *fm = get_fuse_mount_super(sb);
1304 struct fuse_conn *fc = fm->fc;
a62a8ef9 1305 struct virtio_fs *fs = fc->iq.priv;
1dd53957 1306 struct fuse_fs_context *ctx = fsc->fs_private;
a62a8ef9
SH
1307 unsigned int i;
1308 int err;
a62a8ef9 1309
1dd53957 1310 virtio_fs_ctx_set_defaults(ctx);
a62a8ef9
SH
1311 mutex_lock(&virtio_fs_mutex);
1312
1313 /* After holding mutex, make sure virtiofs device is still there.
1314 * Though we are holding a reference to it, drive ->remove might
1315 * still have cleaned up virtual queues. In that case bail out.
1316 */
1317 err = -EINVAL;
1318 if (list_empty(&fs->list)) {
1319 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1320 goto err;
1321 }
1322
1323 err = -ENOMEM;
1324 /* Allocate fuse_dev for hiprio and notification queues */
7fd3abfa 1325 for (i = 0; i < fs->nvqs; i++) {
a62a8ef9
SH
1326 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1327
1328 fsvq->fud = fuse_dev_alloc();
1329 if (!fsvq->fud)
1330 goto err_free_fuse_devs;
1331 }
1332
7fd3abfa 1333 /* virtiofs allocates and installs its own fuse devices */
1dd53957 1334 ctx->fudptr = NULL;
780b1b95
JX
1335 if (ctx->dax_mode != FUSE_DAX_NEVER) {
1336 if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
3f9b9efd
VG
1337 err = -EINVAL;
1338 pr_err("virtio-fs: dax can't be enabled as filesystem"
1339 " device does not support it.\n");
1340 goto err_free_fuse_devs;
1341 }
1dd53957 1342 ctx->dax_dev = fs->dax_dev;
3f9b9efd 1343 }
1dd53957 1344 err = fuse_fill_super_common(sb, ctx);
a62a8ef9
SH
1345 if (err < 0)
1346 goto err_free_fuse_devs;
1347
a62a8ef9
SH
1348 for (i = 0; i < fs->nvqs; i++) {
1349 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1350
a62a8ef9
SH
1351 fuse_dev_install(fsvq->fud, fc);
1352 }
1353
1354 /* Previous unmount will stop all queues. Start these again */
1355 virtio_fs_start_all_queues(fs);
fcee216b 1356 fuse_send_init(fm);
a62a8ef9
SH
1357 mutex_unlock(&virtio_fs_mutex);
1358 return 0;
1359
1360err_free_fuse_devs:
1361 virtio_fs_free_devs(fs);
1362err:
1363 mutex_unlock(&virtio_fs_mutex);
1364 return err;
1365}
1366
fcee216b 1367static void virtio_fs_conn_destroy(struct fuse_mount *fm)
a62a8ef9 1368{
fcee216b
MR
1369 struct fuse_conn *fc = fm->fc;
1370 struct virtio_fs *vfs = fc->iq.priv;
1371 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
a62a8ef9 1372
fcee216b
MR
1373 /* Stop dax worker. Soon evict_inodes() will be called which
1374 * will free all memory ranges belonging to all inodes.
9a752d18
VG
1375 */
1376 if (IS_ENABLED(CONFIG_FUSE_DAX))
1377 fuse_dax_cancel_work(fc);
a62a8ef9
SH
1378
1379 /* Stop forget queue. Soon destroy will be sent */
1380 spin_lock(&fsvq->lock);
1381 fsvq->connected = false;
1382 spin_unlock(&fsvq->lock);
1383 virtio_fs_drain_all_queues(vfs);
1384
fcee216b 1385 fuse_conn_destroy(fm);
a62a8ef9 1386
fcee216b 1387 /* fuse_conn_destroy() must have sent destroy. Stop all queues
a62a8ef9
SH
1388 * and drain one more time and free fuse devices. Freeing fuse
1389 * devices will drop their reference on fuse_conn and that in
1390 * turn will drop its reference on virtio_fs object.
1391 */
1392 virtio_fs_stop_all_queues(vfs);
1393 virtio_fs_drain_all_queues(vfs);
1394 virtio_fs_free_devs(vfs);
1395}
1396
fcee216b
MR
1397static void virtio_kill_sb(struct super_block *sb)
1398{
1399 struct fuse_mount *fm = get_fuse_mount_super(sb);
1400 bool last;
1401
1402 /* If mount failed, we can still be called without any fc */
d534d31d 1403 if (sb->s_root) {
fcee216b
MR
1404 last = fuse_mount_remove(fm);
1405 if (last)
1406 virtio_fs_conn_destroy(fm);
1407 }
1408 kill_anon_super(sb);
a27c061a 1409 fuse_mount_destroy(fm);
fcee216b
MR
1410}
1411
a62a8ef9
SH
1412static int virtio_fs_test_super(struct super_block *sb,
1413 struct fs_context *fsc)
1414{
fcee216b
MR
1415 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1416 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
a62a8ef9 1417
fcee216b 1418 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
a62a8ef9
SH
1419}
1420
a62a8ef9
SH
1421static int virtio_fs_get_tree(struct fs_context *fsc)
1422{
1423 struct virtio_fs *fs;
1424 struct super_block *sb;
a7f0d7aa 1425 struct fuse_conn *fc = NULL;
fcee216b 1426 struct fuse_mount *fm;
a7f0d7aa
CK
1427 unsigned int virtqueue_size;
1428 int err = -EIO;
a62a8ef9
SH
1429
1430 /* This gets a reference on virtio_fs object. This ptr gets installed
1431 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1432 * to drop the reference to this object.
1433 */
1434 fs = virtio_fs_find_instance(fsc->source);
1435 if (!fs) {
1436 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1437 return -EINVAL;
1438 }
1439
a7f0d7aa
CK
1440 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1441 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1442 goto out_err;
1443
833c5a42 1444 err = -ENOMEM;
a62a8ef9 1445 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
833c5a42
MS
1446 if (!fc)
1447 goto out_err;
a62a8ef9 1448
fcee216b 1449 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
833c5a42
MS
1450 if (!fm)
1451 goto out_err;
fcee216b 1452
0a7419c6 1453 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
a62a8ef9
SH
1454 fc->release = fuse_free_conn;
1455 fc->delete_stale = true;
bf109c64 1456 fc->auto_submounts = true;
2d82ab25 1457 fc->sync_fs = true;
a62a8ef9 1458
a7f0d7aa
CK
1459 /* Tell FUSE to split requests that exceed the virtqueue's size */
1460 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1461 virtqueue_size - FUSE_HEADER_OVERHEAD);
1462
fcee216b 1463 fsc->s_fs_info = fm;
b19d3d00 1464 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
c191cd07
MS
1465 if (fsc->s_fs_info)
1466 fuse_mount_destroy(fm);
a62a8ef9
SH
1467 if (IS_ERR(sb))
1468 return PTR_ERR(sb);
1469
1470 if (!sb->s_root) {
1dd53957 1471 err = virtio_fs_fill_super(sb, fsc);
a62a8ef9
SH
1472 if (err) {
1473 deactivate_locked_super(sb);
1474 return err;
1475 }
1476
1477 sb->s_flags |= SB_ACTIVE;
1478 }
1479
1480 WARN_ON(fsc->root);
1481 fsc->root = dget(sb->s_root);
1482 return 0;
833c5a42
MS
1483
1484out_err:
1485 kfree(fc);
1486 mutex_lock(&virtio_fs_mutex);
1487 virtio_fs_put(fs);
1488 mutex_unlock(&virtio_fs_mutex);
1489 return err;
a62a8ef9
SH
1490}
1491
1492static const struct fs_context_operations virtio_fs_context_ops = {
84c21507 1493 .free = virtio_fs_free_fsc,
1dd53957 1494 .parse_param = virtio_fs_parse_param,
a62a8ef9
SH
1495 .get_tree = virtio_fs_get_tree,
1496};
1497
1498static int virtio_fs_init_fs_context(struct fs_context *fsc)
1499{
1dd53957
VG
1500 struct fuse_fs_context *ctx;
1501
fe0a7bd8
GK
1502 if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1503 return fuse_init_fs_context_submount(fsc);
1504
1dd53957
VG
1505 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1506 if (!ctx)
1507 return -ENOMEM;
1508 fsc->fs_private = ctx;
a62a8ef9
SH
1509 fsc->ops = &virtio_fs_context_ops;
1510 return 0;
1511}
1512
1513static struct file_system_type virtio_fs_type = {
1514 .owner = THIS_MODULE,
1515 .name = "virtiofs",
1516 .init_fs_context = virtio_fs_init_fs_context,
1517 .kill_sb = virtio_kill_sb,
1518};
1519
1520static int __init virtio_fs_init(void)
1521{
1522 int ret;
1523
1524 ret = register_virtio_driver(&virtio_fs_driver);
1525 if (ret < 0)
1526 return ret;
1527
1528 ret = register_filesystem(&virtio_fs_type);
1529 if (ret < 0) {
1530 unregister_virtio_driver(&virtio_fs_driver);
1531 return ret;
1532 }
1533
1534 return 0;
1535}
1536module_init(virtio_fs_init);
1537
1538static void __exit virtio_fs_exit(void)
1539{
1540 unregister_filesystem(&virtio_fs_type);
1541 unregister_virtio_driver(&virtio_fs_driver);
1542}
1543module_exit(virtio_fs_exit);
1544
1545MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1546MODULE_DESCRIPTION("Virtio Filesystem");
1547MODULE_LICENSE("GPL");
1548MODULE_ALIAS_FS(KBUILD_MODNAME);
1549MODULE_DEVICE_TABLE(virtio, id_table);