Commit | Line | Data |
---|---|---|
c8383054 JX |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | #include <linux/fdtable.h> | |
3 | #include <linux/anon_inodes.h> | |
4 | #include <linux/uio.h> | |
5 | #include "internal.h" | |
6 | ||
de3e26f9 BL |
7 | static inline void cachefiles_req_put(struct cachefiles_req *req) |
8 | { | |
9 | if (refcount_dec_and_test(&req->ref)) | |
10 | kfree(req); | |
11 | } | |
12 | ||
c8383054 JX |
13 | static int cachefiles_ondemand_fd_release(struct inode *inode, |
14 | struct file *file) | |
15 | { | |
16 | struct cachefiles_object *object = file->private_data; | |
4988e35e BL |
17 | struct cachefiles_cache *cache; |
18 | struct cachefiles_ondemand_info *info; | |
0a790040 | 19 | int object_id; |
9032b6e8 | 20 | struct cachefiles_req *req; |
4988e35e BL |
21 | XA_STATE(xas, NULL, 0); |
22 | ||
23 | if (!object) | |
24 | return 0; | |
25 | ||
26 | info = object->ondemand; | |
27 | cache = object->volume->cache; | |
28 | xas.xa = &cache->reqs; | |
c8383054 | 29 | |
9032b6e8 | 30 | xa_lock(&cache->reqs); |
0a790040 BL |
31 | spin_lock(&info->lock); |
32 | object_id = info->ondemand_id; | |
3c5ecfe1 | 33 | info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; |
357a18d0 | 34 | cachefiles_ondemand_set_object_close(object); |
0a790040 | 35 | spin_unlock(&info->lock); |
9032b6e8 | 36 | |
0a7e54c1 JZ |
37 | /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */ |
38 | xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { | |
65aa5f6f | 39 | if (req->msg.object_id == object_id && |
0a7e54c1 | 40 | req->msg.opcode == CACHEFILES_OP_CLOSE) { |
9032b6e8 JX |
41 | complete(&req->done); |
42 | xas_store(&xas, NULL); | |
43 | } | |
44 | } | |
45 | xa_unlock(&cache->reqs); | |
46 | ||
c8383054 | 47 | xa_erase(&cache->ondemand_ids, object_id); |
1519670e | 48 | trace_cachefiles_ondemand_fd_release(object, object_id); |
c8383054 | 49 | cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); |
d11b0b04 | 50 | cachefiles_put_unbind_pincount(cache); |
c8383054 JX |
51 | return 0; |
52 | } | |
53 | ||
54 | static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, | |
55 | struct iov_iter *iter) | |
56 | { | |
57 | struct cachefiles_object *object = kiocb->ki_filp->private_data; | |
58 | struct cachefiles_cache *cache = object->volume->cache; | |
59 | struct file *file = object->file; | |
60 | size_t len = iter->count; | |
61 | loff_t pos = kiocb->ki_pos; | |
62 | const struct cred *saved_cred; | |
63 | int ret; | |
64 | ||
65 | if (!file) | |
66 | return -ENOBUFS; | |
67 | ||
68 | cachefiles_begin_secure(cache, &saved_cred); | |
e0ace6ca | 69 | ret = __cachefiles_prepare_write(object, file, &pos, &len, len, true); |
c8383054 JX |
70 | cachefiles_end_secure(cache, saved_cred); |
71 | if (ret < 0) | |
72 | return ret; | |
73 | ||
1519670e | 74 | trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len); |
c8383054 JX |
75 | ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); |
76 | if (!ret) | |
77 | ret = len; | |
78 | ||
79 | return ret; | |
80 | } | |
81 | ||
82 | static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, | |
83 | int whence) | |
84 | { | |
85 | struct cachefiles_object *object = filp->private_data; | |
86 | struct file *file = object->file; | |
87 | ||
88 | if (!file) | |
89 | return -ENOBUFS; | |
90 | ||
91 | return vfs_llseek(file, pos, whence); | |
92 | } | |
93 | ||
9032b6e8 | 94 | static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, |
a26dc49d | 95 | unsigned long id) |
9032b6e8 JX |
96 | { |
97 | struct cachefiles_object *object = filp->private_data; | |
98 | struct cachefiles_cache *cache = object->volume->cache; | |
99 | struct cachefiles_req *req; | |
a26dc49d | 100 | XA_STATE(xas, &cache->reqs, id); |
9032b6e8 JX |
101 | |
102 | if (ioctl != CACHEFILES_IOC_READ_COMPLETE) | |
103 | return -EINVAL; | |
104 | ||
105 | if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) | |
106 | return -EOPNOTSUPP; | |
107 | ||
a26dc49d BL |
108 | xa_lock(&cache->reqs); |
109 | req = xas_load(&xas); | |
110 | if (!req || req->msg.opcode != CACHEFILES_OP_READ || | |
111 | req->object != object) { | |
112 | xa_unlock(&cache->reqs); | |
9032b6e8 | 113 | return -EINVAL; |
a26dc49d BL |
114 | } |
115 | xas_store(&xas, NULL); | |
116 | xa_unlock(&cache->reqs); | |
9032b6e8 | 117 | |
1519670e | 118 | trace_cachefiles_ondemand_cread(object, id); |
9032b6e8 JX |
119 | complete(&req->done); |
120 | return 0; | |
121 | } | |
122 | ||
c8383054 JX |
123 | static const struct file_operations cachefiles_ondemand_fd_fops = { |
124 | .owner = THIS_MODULE, | |
125 | .release = cachefiles_ondemand_fd_release, | |
126 | .write_iter = cachefiles_ondemand_fd_write_iter, | |
127 | .llseek = cachefiles_ondemand_fd_llseek, | |
9032b6e8 | 128 | .unlocked_ioctl = cachefiles_ondemand_fd_ioctl, |
c8383054 JX |
129 | }; |
130 | ||
131 | /* | |
132 | * OPEN request Completion (copen) | |
133 | * - command: "copen <id>,<cache_size>" | |
134 | * <cache_size> indicates the object size if >=0, error code if negative | |
135 | */ | |
136 | int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) | |
137 | { | |
138 | struct cachefiles_req *req; | |
139 | struct fscache_cookie *cookie; | |
0a790040 | 140 | struct cachefiles_ondemand_info *info; |
c8383054 JX |
141 | char *pid, *psize; |
142 | unsigned long id; | |
143 | long size; | |
144 | int ret; | |
a26dc49d | 145 | XA_STATE(xas, &cache->reqs, 0); |
c8383054 JX |
146 | |
147 | if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) | |
148 | return -EOPNOTSUPP; | |
149 | ||
150 | if (!*args) { | |
151 | pr_err("Empty id specified\n"); | |
152 | return -EINVAL; | |
153 | } | |
154 | ||
155 | pid = args; | |
156 | psize = strchr(args, ','); | |
157 | if (!psize) { | |
158 | pr_err("Cache size is not specified\n"); | |
159 | return -EINVAL; | |
160 | } | |
161 | ||
162 | *psize = 0; | |
163 | psize++; | |
164 | ||
165 | ret = kstrtoul(pid, 0, &id); | |
166 | if (ret) | |
167 | return ret; | |
168 | ||
a26dc49d BL |
169 | xa_lock(&cache->reqs); |
170 | xas.xa_index = id; | |
171 | req = xas_load(&xas); | |
172 | if (!req || req->msg.opcode != CACHEFILES_OP_OPEN || | |
173 | !req->object->ondemand->ondemand_id) { | |
174 | xa_unlock(&cache->reqs); | |
c8383054 | 175 | return -EINVAL; |
a26dc49d BL |
176 | } |
177 | xas_store(&xas, NULL); | |
178 | xa_unlock(&cache->reqs); | |
c8383054 JX |
179 | |
180 | /* fail OPEN request if copen format is invalid */ | |
181 | ret = kstrtol(psize, 0, &size); | |
182 | if (ret) { | |
183 | req->error = ret; | |
184 | goto out; | |
185 | } | |
186 | ||
187 | /* fail OPEN request if daemon reports an error */ | |
188 | if (size < 0) { | |
c93ccd63 SK |
189 | if (!IS_ERR_VALUE(size)) { |
190 | req->error = -EINVAL; | |
191 | ret = -EINVAL; | |
192 | } else { | |
193 | req->error = size; | |
194 | ret = 0; | |
195 | } | |
c8383054 JX |
196 | goto out; |
197 | } | |
198 | ||
0a790040 BL |
199 | info = req->object->ondemand; |
200 | spin_lock(&info->lock); | |
201 | /* | |
202 | * The anonymous fd was closed before copen ? Fail the request. | |
203 | * | |
204 | * t1 | t2 | |
205 | * --------------------------------------------------------- | |
206 | * cachefiles_ondemand_copen | |
207 | * req = xa_erase(&cache->reqs, id) | |
208 | * // Anon fd is maliciously closed. | |
209 | * cachefiles_ondemand_fd_release | |
210 | * xa_lock(&cache->reqs) | |
211 | * cachefiles_ondemand_set_object_close(object) | |
212 | * xa_unlock(&cache->reqs) | |
213 | * cachefiles_ondemand_set_object_open | |
214 | * // No one will ever close it again. | |
215 | * cachefiles_ondemand_daemon_read | |
216 | * cachefiles_ondemand_select_req | |
217 | * | |
218 | * Get a read req but its fd is already closed. The daemon can't | |
219 | * issue a cread ioctl with an closed fd, then hung. | |
220 | */ | |
221 | if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) { | |
222 | spin_unlock(&info->lock); | |
223 | req->error = -EBADFD; | |
224 | goto out; | |
225 | } | |
c8383054 JX |
226 | cookie = req->object->cookie; |
227 | cookie->object_size = size; | |
228 | if (size) | |
229 | clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); | |
230 | else | |
231 | set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); | |
1519670e | 232 | trace_cachefiles_ondemand_copen(req->object, id, size); |
c8383054 | 233 | |
357a18d0 | 234 | cachefiles_ondemand_set_object_open(req->object); |
0a790040 | 235 | spin_unlock(&info->lock); |
0a7e54c1 | 236 | wake_up_all(&cache->daemon_pollwq); |
357a18d0 | 237 | |
c8383054 JX |
238 | out: |
239 | complete(&req->done); | |
240 | return ret; | |
241 | } | |
242 | ||
e73fa11a JZ |
243 | int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) |
244 | { | |
245 | struct cachefiles_req *req; | |
246 | ||
247 | XA_STATE(xas, &cache->reqs, 0); | |
248 | ||
249 | if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) | |
250 | return -EOPNOTSUPP; | |
251 | ||
252 | /* | |
253 | * Reset the requests to CACHEFILES_REQ_NEW state, so that the | |
254 | * requests have been processed halfway before the crash of the | |
255 | * user daemon could be reprocessed after the recovery. | |
256 | */ | |
257 | xas_lock(&xas); | |
258 | xas_for_each(&xas, req, ULONG_MAX) | |
259 | xas_set_mark(&xas, CACHEFILES_REQ_NEW); | |
260 | xas_unlock(&xas); | |
261 | ||
262 | wake_up_all(&cache->daemon_pollwq); | |
263 | return 0; | |
264 | } | |
265 | ||
c8383054 JX |
266 | static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) |
267 | { | |
268 | struct cachefiles_object *object; | |
269 | struct cachefiles_cache *cache; | |
270 | struct cachefiles_open *load; | |
271 | struct file *file; | |
272 | u32 object_id; | |
273 | int ret, fd; | |
274 | ||
275 | object = cachefiles_grab_object(req->object, | |
276 | cachefiles_obj_get_ondemand_fd); | |
277 | cache = object->volume->cache; | |
278 | ||
279 | ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL, | |
280 | XA_LIMIT(1, INT_MAX), | |
281 | &cache->ondemand_id_next, GFP_KERNEL); | |
282 | if (ret < 0) | |
283 | goto err; | |
284 | ||
285 | fd = get_unused_fd_flags(O_WRONLY); | |
286 | if (fd < 0) { | |
287 | ret = fd; | |
288 | goto err_free_id; | |
289 | } | |
290 | ||
291 | file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, | |
292 | object, O_WRONLY); | |
293 | if (IS_ERR(file)) { | |
294 | ret = PTR_ERR(file); | |
295 | goto err_put_fd; | |
296 | } | |
297 | ||
4988e35e BL |
298 | spin_lock(&object->ondemand->lock); |
299 | if (object->ondemand->ondemand_id > 0) { | |
300 | spin_unlock(&object->ondemand->lock); | |
301 | /* Pair with check in cachefiles_ondemand_fd_release(). */ | |
302 | file->private_data = NULL; | |
303 | ret = -EEXIST; | |
304 | goto err_put_file; | |
305 | } | |
306 | ||
c8383054 JX |
307 | file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; |
308 | fd_install(fd, file); | |
309 | ||
310 | load = (void *)req->msg.data; | |
311 | load->fd = fd; | |
3c5ecfe1 | 312 | object->ondemand->ondemand_id = object_id; |
4988e35e | 313 | spin_unlock(&object->ondemand->lock); |
d11b0b04 JX |
314 | |
315 | cachefiles_get_unbind_pincount(cache); | |
1519670e | 316 | trace_cachefiles_ondemand_open(object, &req->msg, load); |
c8383054 JX |
317 | return 0; |
318 | ||
4988e35e BL |
319 | err_put_file: |
320 | fput(file); | |
c8383054 JX |
321 | err_put_fd: |
322 | put_unused_fd(fd); | |
323 | err_free_id: | |
324 | xa_erase(&cache->ondemand_ids, object_id); | |
325 | err: | |
4988e35e BL |
326 | spin_lock(&object->ondemand->lock); |
327 | /* Avoid marking an opened object as closed. */ | |
328 | if (object->ondemand->ondemand_id <= 0) | |
329 | cachefiles_ondemand_set_object_close(object); | |
330 | spin_unlock(&object->ondemand->lock); | |
c8383054 JX |
331 | cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); |
332 | return ret; | |
333 | } | |
334 | ||
0a7e54c1 JZ |
335 | static void ondemand_object_worker(struct work_struct *work) |
336 | { | |
337 | struct cachefiles_ondemand_info *info = | |
338 | container_of(work, struct cachefiles_ondemand_info, ondemand_work); | |
339 | ||
340 | cachefiles_ondemand_init_object(info->object); | |
341 | } | |
342 | ||
343 | /* | |
344 | * If there are any inflight or subsequent READ requests on the | |
345 | * closed object, reopen it. | |
346 | * Skip read requests whose related object is reopening. | |
347 | */ | |
348 | static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas, | |
349 | unsigned long xa_max) | |
350 | { | |
351 | struct cachefiles_req *req; | |
352 | struct cachefiles_object *object; | |
353 | struct cachefiles_ondemand_info *info; | |
354 | ||
355 | xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) { | |
356 | if (req->msg.opcode != CACHEFILES_OP_READ) | |
357 | return req; | |
358 | object = req->object; | |
359 | info = object->ondemand; | |
360 | if (cachefiles_ondemand_object_is_close(object)) { | |
361 | cachefiles_ondemand_set_object_reopening(object); | |
362 | queue_work(fscache_wq, &info->ondemand_work); | |
363 | continue; | |
364 | } | |
365 | if (cachefiles_ondemand_object_is_reopening(object)) | |
366 | continue; | |
367 | return req; | |
368 | } | |
369 | return NULL; | |
370 | } | |
371 | ||
c8383054 JX |
372 | ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, |
373 | char __user *_buffer, size_t buflen) | |
374 | { | |
375 | struct cachefiles_req *req; | |
376 | struct cachefiles_msg *msg; | |
c8383054 JX |
377 | size_t n; |
378 | int ret = 0; | |
1122f400 | 379 | XA_STATE(xas, &cache->reqs, cache->req_id_next); |
c8383054 | 380 | |
0a7e54c1 | 381 | xa_lock(&cache->reqs); |
c8383054 | 382 | /* |
1122f400 XY |
383 | * Cyclically search for a request that has not ever been processed, |
384 | * to prevent requests from being processed repeatedly, and make | |
385 | * request distribution fair. | |
c8383054 | 386 | */ |
0a7e54c1 | 387 | req = cachefiles_ondemand_select_req(&xas, ULONG_MAX); |
1122f400 XY |
388 | if (!req && cache->req_id_next > 0) { |
389 | xas_set(&xas, 0); | |
0a7e54c1 | 390 | req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1); |
1122f400 | 391 | } |
c8383054 JX |
392 | if (!req) { |
393 | xa_unlock(&cache->reqs); | |
394 | return 0; | |
395 | } | |
396 | ||
397 | msg = &req->msg; | |
398 | n = msg->len; | |
399 | ||
400 | if (n > buflen) { | |
401 | xa_unlock(&cache->reqs); | |
402 | return -EMSGSIZE; | |
403 | } | |
404 | ||
405 | xas_clear_mark(&xas, CACHEFILES_REQ_NEW); | |
1122f400 | 406 | cache->req_id_next = xas.xa_index + 1; |
de3e26f9 | 407 | refcount_inc(&req->ref); |
da4a8274 | 408 | cachefiles_grab_object(req->object, cachefiles_obj_get_read_req); |
c8383054 JX |
409 | xa_unlock(&cache->reqs); |
410 | ||
c8383054 JX |
411 | if (msg->opcode == CACHEFILES_OP_OPEN) { |
412 | ret = cachefiles_ondemand_get_fd(req); | |
4988e35e | 413 | if (ret) |
3e6d704f | 414 | goto out; |
c8383054 JX |
415 | } |
416 | ||
3e6d704f | 417 | msg->msg_id = xas.xa_index; |
0a7e54c1 JZ |
418 | msg->object_id = req->object->ondemand->ondemand_id; |
419 | ||
c8383054 JX |
420 | if (copy_to_user(_buffer, msg, n) != 0) { |
421 | ret = -EFAULT; | |
3e6d704f BL |
422 | if (msg->opcode == CACHEFILES_OP_OPEN) |
423 | close_fd(((struct cachefiles_open *)msg->data)->fd); | |
324b954a | 424 | } |
3e6d704f | 425 | out: |
da4a8274 | 426 | cachefiles_put_object(req->object, cachefiles_obj_put_read_req); |
3e6d704f BL |
427 | /* Remove error request and CLOSE request has no reply */ |
428 | if (ret || msg->opcode == CACHEFILES_OP_CLOSE) { | |
429 | xas_reset(&xas); | |
430 | xas_lock(&xas); | |
431 | if (xas_load(&xas) == req) { | |
432 | req->error = ret; | |
433 | complete(&req->done); | |
434 | xas_store(&xas, NULL); | |
435 | } | |
436 | xas_unlock(&xas); | |
de3e26f9 | 437 | } |
de3e26f9 | 438 | cachefiles_req_put(req); |
3e6d704f | 439 | return ret ? ret : n; |
c8383054 JX |
440 | } |
441 | ||
442 | typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); | |
443 | ||
444 | static int cachefiles_ondemand_send_req(struct cachefiles_object *object, | |
445 | enum cachefiles_opcode opcode, | |
446 | size_t data_len, | |
447 | init_req_fn init_req, | |
448 | void *private) | |
449 | { | |
450 | struct cachefiles_cache *cache = object->volume->cache; | |
0a7e54c1 | 451 | struct cachefiles_req *req = NULL; |
c8383054 JX |
452 | XA_STATE(xas, &cache->reqs, 0); |
453 | int ret; | |
454 | ||
455 | if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) | |
456 | return 0; | |
457 | ||
0a7e54c1 JZ |
458 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { |
459 | ret = -EIO; | |
460 | goto out; | |
461 | } | |
c8383054 JX |
462 | |
463 | req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); | |
0a7e54c1 JZ |
464 | if (!req) { |
465 | ret = -ENOMEM; | |
466 | goto out; | |
467 | } | |
c8383054 | 468 | |
de3e26f9 | 469 | refcount_set(&req->ref, 1); |
c8383054 JX |
470 | req->object = object; |
471 | init_completion(&req->done); | |
472 | req->msg.opcode = opcode; | |
473 | req->msg.len = sizeof(struct cachefiles_msg) + data_len; | |
474 | ||
475 | ret = init_req(req, private); | |
476 | if (ret) | |
477 | goto out; | |
478 | ||
479 | do { | |
480 | /* | |
481 | * Stop enqueuing the request when daemon is dying. The | |
482 | * following two operations need to be atomic as a whole. | |
483 | * 1) check cache state, and | |
484 | * 2) enqueue request if cache is alive. | |
485 | * Otherwise the request may be enqueued after xarray has been | |
486 | * flushed, leaving the orphan request never being completed. | |
487 | * | |
488 | * CPU 1 CPU 2 | |
489 | * ===== ===== | |
490 | * test CACHEFILES_DEAD bit | |
491 | * set CACHEFILES_DEAD bit | |
492 | * flush requests in the xarray | |
493 | * enqueue the request | |
494 | */ | |
495 | xas_lock(&xas); | |
496 | ||
497 | if (test_bit(CACHEFILES_DEAD, &cache->flags)) { | |
498 | xas_unlock(&xas); | |
499 | ret = -EIO; | |
500 | goto out; | |
501 | } | |
502 | ||
503 | /* coupled with the barrier in cachefiles_flush_reqs() */ | |
504 | smp_mb(); | |
505 | ||
0a7e54c1 | 506 | if (opcode == CACHEFILES_OP_CLOSE && |
357a18d0 | 507 | !cachefiles_ondemand_object_is_open(object)) { |
3c5ecfe1 | 508 | WARN_ON_ONCE(object->ondemand->ondemand_id == 0); |
324b954a JX |
509 | xas_unlock(&xas); |
510 | ret = -EIO; | |
511 | goto out; | |
512 | } | |
513 | ||
c8383054 JX |
514 | xas.xa_index = 0; |
515 | xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); | |
516 | if (xas.xa_node == XAS_RESTART) | |
517 | xas_set_err(&xas, -EBUSY); | |
518 | xas_store(&xas, req); | |
519 | xas_clear_mark(&xas, XA_FREE_MARK); | |
520 | xas_set_mark(&xas, CACHEFILES_REQ_NEW); | |
521 | xas_unlock(&xas); | |
522 | } while (xas_nomem(&xas, GFP_KERNEL)); | |
523 | ||
524 | ret = xas_error(&xas); | |
525 | if (ret) | |
526 | goto out; | |
527 | ||
528 | wake_up_all(&cache->daemon_pollwq); | |
529 | wait_for_completion(&req->done); | |
530 | ret = req->error; | |
de3e26f9 | 531 | cachefiles_req_put(req); |
0a7e54c1 | 532 | return ret; |
c8383054 | 533 | out: |
0a7e54c1 JZ |
534 | /* Reset the object to close state in error handling path. |
535 | * If error occurs after creating the anonymous fd, | |
536 | * cachefiles_ondemand_fd_release() will set object to close. | |
537 | */ | |
538 | if (opcode == CACHEFILES_OP_OPEN) | |
539 | cachefiles_ondemand_set_object_close(object); | |
c8383054 JX |
540 | kfree(req); |
541 | return ret; | |
542 | } | |
543 | ||
544 | static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req, | |
545 | void *private) | |
546 | { | |
547 | struct cachefiles_object *object = req->object; | |
548 | struct fscache_cookie *cookie = object->cookie; | |
549 | struct fscache_volume *volume = object->volume->vcookie; | |
550 | struct cachefiles_open *load = (void *)req->msg.data; | |
551 | size_t volume_key_size, cookie_key_size; | |
552 | void *volume_key, *cookie_key; | |
553 | ||
554 | /* | |
555 | * Volume key is a NUL-terminated string. key[0] stores strlen() of the | |
556 | * string, followed by the content of the string (excluding '\0'). | |
557 | */ | |
558 | volume_key_size = volume->key[0] + 1; | |
559 | volume_key = volume->key + 1; | |
560 | ||
561 | /* Cookie key is binary data, which is netfs specific. */ | |
562 | cookie_key_size = cookie->key_len; | |
563 | cookie_key = fscache_get_key(cookie); | |
564 | ||
565 | if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) { | |
566 | pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n"); | |
567 | return -EINVAL; | |
568 | } | |
569 | ||
570 | load->volume_key_size = volume_key_size; | |
571 | load->cookie_key_size = cookie_key_size; | |
572 | memcpy(load->data, volume_key, volume_key_size); | |
573 | memcpy(load->data + volume_key_size, cookie_key, cookie_key_size); | |
574 | ||
575 | return 0; | |
576 | } | |
577 | ||
324b954a JX |
578 | static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, |
579 | void *private) | |
580 | { | |
581 | struct cachefiles_object *object = req->object; | |
324b954a | 582 | |
357a18d0 | 583 | if (!cachefiles_ondemand_object_is_open(object)) |
324b954a JX |
584 | return -ENOENT; |
585 | ||
1519670e | 586 | trace_cachefiles_ondemand_close(object, &req->msg); |
324b954a JX |
587 | return 0; |
588 | } | |
589 | ||
9032b6e8 JX |
590 | struct cachefiles_read_ctx { |
591 | loff_t off; | |
592 | size_t len; | |
593 | }; | |
594 | ||
595 | static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, | |
596 | void *private) | |
597 | { | |
598 | struct cachefiles_object *object = req->object; | |
599 | struct cachefiles_read *load = (void *)req->msg.data; | |
600 | struct cachefiles_read_ctx *read_ctx = private; | |
9032b6e8 | 601 | |
9032b6e8 JX |
602 | load->off = read_ctx->off; |
603 | load->len = read_ctx->len; | |
1519670e | 604 | trace_cachefiles_ondemand_read(object, &req->msg, load); |
9032b6e8 JX |
605 | return 0; |
606 | } | |
607 | ||
c8383054 JX |
608 | int cachefiles_ondemand_init_object(struct cachefiles_object *object) |
609 | { | |
610 | struct fscache_cookie *cookie = object->cookie; | |
611 | struct fscache_volume *volume = object->volume->vcookie; | |
612 | size_t volume_key_size, cookie_key_size, data_len; | |
613 | ||
c3d6569a DH |
614 | if (!object->ondemand) |
615 | return 0; | |
616 | ||
c8383054 JX |
617 | /* |
618 | * CacheFiles will firstly check the cache file under the root cache | |
619 | * directory. If the coherency check failed, it will fallback to | |
620 | * creating a new tmpfile as the cache file. Reuse the previously | |
621 | * allocated object ID if any. | |
622 | */ | |
357a18d0 | 623 | if (cachefiles_ondemand_object_is_open(object)) |
c8383054 JX |
624 | return 0; |
625 | ||
626 | volume_key_size = volume->key[0] + 1; | |
627 | cookie_key_size = cookie->key_len; | |
628 | data_len = sizeof(struct cachefiles_open) + | |
629 | volume_key_size + cookie_key_size; | |
630 | ||
631 | return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN, | |
632 | data_len, cachefiles_ondemand_init_open_req, NULL); | |
633 | } | |
324b954a JX |
634 | |
635 | void cachefiles_ondemand_clean_object(struct cachefiles_object *object) | |
636 | { | |
637 | cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, | |
638 | cachefiles_ondemand_init_close_req, NULL); | |
639 | } | |
9032b6e8 | 640 | |
3c5ecfe1 JZ |
641 | int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, |
642 | struct cachefiles_volume *volume) | |
643 | { | |
644 | if (!cachefiles_in_ondemand_mode(volume->cache)) | |
645 | return 0; | |
646 | ||
647 | object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info), | |
648 | GFP_KERNEL); | |
649 | if (!object->ondemand) | |
650 | return -ENOMEM; | |
651 | ||
652 | object->ondemand->object = object; | |
0a790040 | 653 | spin_lock_init(&object->ondemand->lock); |
0a7e54c1 | 654 | INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker); |
3c5ecfe1 JZ |
655 | return 0; |
656 | } | |
657 | ||
658 | void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object) | |
659 | { | |
660 | kfree(object->ondemand); | |
661 | object->ondemand = NULL; | |
662 | } | |
663 | ||
9032b6e8 JX |
664 | int cachefiles_ondemand_read(struct cachefiles_object *object, |
665 | loff_t pos, size_t len) | |
666 | { | |
667 | struct cachefiles_read_ctx read_ctx = {pos, len}; | |
668 | ||
669 | return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ, | |
670 | sizeof(struct cachefiles_read), | |
671 | cachefiles_ondemand_init_read_req, &read_ctx); | |
672 | } |