fuse: allow parallel dio writes with FUSE_DIRECT_IO_ALLOW_MMAP
[linux-2.6-block.git] / fs / fuse / iomode.c
CommitLineData
cb098dd2
AG
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * FUSE inode io modes.
4 *
5 * Copyright (c) 2024 CTERA Networks.
6 */
7
8#include "fuse_i.h"
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/file.h>
13#include <linux/fs.h>
14
15/*
205c1d80
AG
16 * Return true if need to wait for new opens in caching mode.
17 */
18static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
19{
20 return READ_ONCE(fi->iocachectr) < 0;
21}
22
23/*
24 * Start cached io mode.
25 *
26 * Blocks new parallel dio writes and waits for the in-progress parallel dio
27 * writes to complete.
cb098dd2
AG
28 */
29int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
30{
31 struct fuse_inode *fi = get_fuse_inode(inode);
cb098dd2
AG
32
33 /* There are no io modes if server does not implement open */
34 if (!ff->release_args)
35 return 0;
36
37 spin_lock(&fi->lock);
205c1d80
AG
38 /*
39 * Setting the bit advises new direct-io writes to use an exclusive
40 * lock - without it the wait below might be forever.
41 */
42 while (fuse_is_io_cache_wait(fi)) {
43 set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
44 spin_unlock(&fi->lock);
45 wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi));
46 spin_lock(&fi->lock);
cb098dd2
AG
47 }
48 WARN_ON(ff->iomode == IOM_UNCACHED);
49 if (ff->iomode == IOM_NONE) {
50 ff->iomode = IOM_CACHED;
51 if (fi->iocachectr == 0)
52 set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
53 fi->iocachectr++;
54 }
cb098dd2 55 spin_unlock(&fi->lock);
205c1d80 56 return 0;
cb098dd2
AG
57}
58
59static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
60{
61 struct fuse_inode *fi = get_fuse_inode(inode);
62
63 spin_lock(&fi->lock);
64 WARN_ON(fi->iocachectr <= 0);
65 WARN_ON(ff->iomode != IOM_CACHED);
66 ff->iomode = IOM_NONE;
67 fi->iocachectr--;
68 if (fi->iocachectr == 0)
69 clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
70 spin_unlock(&fi->lock);
71}
72
73/* Start strictly uncached io mode where cache access is not allowed */
205c1d80 74int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff)
cb098dd2
AG
75{
76 struct fuse_inode *fi = get_fuse_inode(inode);
77 int err = 0;
78
79 spin_lock(&fi->lock);
80 if (fi->iocachectr > 0) {
81 err = -ETXTBSY;
82 goto unlock;
83 }
84 WARN_ON(ff->iomode != IOM_NONE);
85 fi->iocachectr--;
86 ff->iomode = IOM_UNCACHED;
87unlock:
88 spin_unlock(&fi->lock);
89 return err;
90}
91
205c1d80 92void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
cb098dd2
AG
93{
94 struct fuse_inode *fi = get_fuse_inode(inode);
95
96 spin_lock(&fi->lock);
97 WARN_ON(fi->iocachectr >= 0);
98 WARN_ON(ff->iomode != IOM_UNCACHED);
99 ff->iomode = IOM_NONE;
100 fi->iocachectr++;
205c1d80
AG
101 if (!fi->iocachectr)
102 wake_up(&fi->direct_io_waitq);
cb098dd2
AG
103 spin_unlock(&fi->lock);
104}
105
106/* Request access to submit new io to inode via open file */
107int fuse_file_io_open(struct file *file, struct inode *inode)
108{
109 struct fuse_file *ff = file->private_data;
110 int err;
111
112 /*
113 * io modes are not relevant with DAX and with server that does not
114 * implement open.
115 */
116 if (FUSE_IS_DAX(inode) || !ff->release_args)
117 return 0;
118
119 /*
120 * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO.
121 */
122 if (!(ff->open_flags & FOPEN_DIRECT_IO))
123 ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
124
125 /*
cb098dd2
AG
126 * First caching file open enters caching inode io mode.
127 *
128 * Note that if user opens a file open with O_DIRECT, but server did
129 * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
130 * so we put the inode in caching mode to prevent parallel dio.
131 */
205c1d80
AG
132 if (ff->open_flags & FOPEN_DIRECT_IO)
133 return 0;
134
135 err = fuse_file_cached_io_start(inode, ff);
cb098dd2
AG
136 if (err)
137 goto fail;
138
139 return 0;
140
141fail:
142 pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n",
143 ff->open_flags, err);
144 /*
145 * The file open mode determines the inode io mode.
146 * Using incorrect open mode is a server mistake, which results in
147 * user visible failure of open() with EIO error.
148 */
149 return -EIO;
150}
151
152/* No more pending io and no new io possible to inode via open/mmapped file */
153void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
154{
155 /*
156 * Last parallel dio close allows caching inode io mode.
157 * Last caching file close exits caching inode io mode.
158 */
159 switch (ff->iomode) {
160 case IOM_NONE:
161 /* Nothing to do */
162 break;
163 case IOM_UNCACHED:
164 fuse_file_uncached_io_end(inode, ff);
165 break;
166 case IOM_CACHED:
167 fuse_file_cached_io_end(inode, ff);
168 break;
169 }
170}