fs: Remove aop flags parameter from cont_write_begin()
[linux-block.git] / fs / orangefs / inode.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
5db11c21
MM
2/*
3 * (C) 2001 Clemson University and The University of Chicago
afd9fb2a 4 * Copyright 2018 Omnibond Systems, L.L.C.
5db11c21
MM
5 *
6 * See COPYING in top-level directory.
7 */
8
9/*
10 * Linux VFS inode operations.
11 */
12
ccdf7741 13#include <linux/blkdev.h>
1f26b062 14#include <linux/fileattr.h>
5db11c21 15#include "protocol.h"
575e9461
MM
16#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
5db11c21 18
52e2d0a3
MB
19static int orangefs_writepage_locked(struct page *page,
20 struct writeback_control *wbc)
85ac799c
MB
21{
22 struct inode *inode = page->mapping->host;
52e2d0a3 23 struct orangefs_write_range *wr = NULL;
85ac799c
MB
24 struct iov_iter iter;
25 struct bio_vec bv;
26 size_t len, wlen;
27 ssize_t ret;
28 loff_t off;
29
30 set_page_writeback(page);
31
85ac799c 32 len = i_size_read(inode);
52e2d0a3
MB
33 if (PagePrivate(page)) {
34 wr = (struct orangefs_write_range *)page_private(page);
8f04e1be 35 WARN_ON(wr->pos >= len);
52e2d0a3
MB
36 off = wr->pos;
37 if (off + wr->len > len)
38 wlen = len - off;
39 else
40 wlen = wr->len;
41 } else {
42 WARN_ON(1);
43 off = page_offset(page);
44 if (off + PAGE_SIZE > len)
45 wlen = len - off;
46 else
47 wlen = PAGE_SIZE;
85ac799c 48 }
2a40be81 49 /* Should've been handled in orangefs_invalidate_folio. */
52e2d0a3 50 WARN_ON(off == len || off + wlen > len);
85ac799c
MB
51
52 bv.bv_page = page;
53 bv.bv_len = wlen;
54 bv.bv_offset = off % PAGE_SIZE;
52e2d0a3 55 WARN_ON(wlen == 0);
85ac799c
MB
56 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
57
58 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
f9bbb682 59 len, wr, NULL, NULL);
85ac799c
MB
60 if (ret < 0) {
61 SetPageError(page);
62 mapping_set_error(page->mapping, ret);
63 } else {
64 ret = 0;
65 }
4c42be38 66 kfree(detach_page_private(page));
52e2d0a3
MB
67 return ret;
68}
69
70static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
71{
72 int ret;
73 ret = orangefs_writepage_locked(page, wbc);
85ac799c
MB
74 unlock_page(page);
75 end_page_writeback(page);
76 return ret;
77}
78
8f04e1be
MB
79struct orangefs_writepages {
80 loff_t off;
81 size_t len;
82 kuid_t uid;
83 kgid_t gid;
84 int maxpages;
85 int npages;
86 struct page **pages;
87 struct bio_vec *bv;
88};
89
90static int orangefs_writepages_work(struct orangefs_writepages *ow,
91 struct writeback_control *wbc)
92{
93 struct inode *inode = ow->pages[0]->mapping->host;
94 struct orangefs_write_range *wrp, wr;
95 struct iov_iter iter;
96 ssize_t ret;
97 size_t len;
98 loff_t off;
99 int i;
100
101 len = i_size_read(inode);
102
103 for (i = 0; i < ow->npages; i++) {
104 set_page_writeback(ow->pages[i]);
105 ow->bv[i].bv_page = ow->pages[i];
106 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
107 ow->off + ow->len) -
108 max(ow->off, page_offset(ow->pages[i]));
109 if (i == 0)
110 ow->bv[i].bv_offset = ow->off -
111 page_offset(ow->pages[i]);
112 else
113 ow->bv[i].bv_offset = 0;
114 }
115 iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
116
117 WARN_ON(ow->off >= len);
118 if (ow->off + ow->len > len)
119 ow->len = len - ow->off;
120
121 off = ow->off;
122 wr.uid = ow->uid;
123 wr.gid = ow->gid;
124 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
f9bbb682 125 0, &wr, NULL, NULL);
8f04e1be
MB
126 if (ret < 0) {
127 for (i = 0; i < ow->npages; i++) {
128 SetPageError(ow->pages[i]);
129 mapping_set_error(ow->pages[i]->mapping, ret);
130 if (PagePrivate(ow->pages[i])) {
131 wrp = (struct orangefs_write_range *)
132 page_private(ow->pages[i]);
133 ClearPagePrivate(ow->pages[i]);
134 put_page(ow->pages[i]);
135 kfree(wrp);
136 }
137 end_page_writeback(ow->pages[i]);
138 unlock_page(ow->pages[i]);
139 }
140 } else {
141 ret = 0;
142 for (i = 0; i < ow->npages; i++) {
143 if (PagePrivate(ow->pages[i])) {
144 wrp = (struct orangefs_write_range *)
145 page_private(ow->pages[i]);
146 ClearPagePrivate(ow->pages[i]);
147 put_page(ow->pages[i]);
148 kfree(wrp);
149 }
150 end_page_writeback(ow->pages[i]);
151 unlock_page(ow->pages[i]);
152 }
153 }
154 return ret;
155}
156
157static int orangefs_writepages_callback(struct page *page,
158 struct writeback_control *wbc, void *data)
159{
160 struct orangefs_writepages *ow = data;
161 struct orangefs_write_range *wr;
162 int ret;
163
164 if (!PagePrivate(page)) {
165 unlock_page(page);
166 /* It's not private so there's nothing to write, right? */
167 printk("writepages_callback not private!\n");
168 BUG();
169 return 0;
170 }
171 wr = (struct orangefs_write_range *)page_private(page);
172
173 ret = -1;
174 if (ow->npages == 0) {
175 ow->off = wr->pos;
176 ow->len = wr->len;
177 ow->uid = wr->uid;
178 ow->gid = wr->gid;
179 ow->pages[ow->npages++] = page;
180 ret = 0;
181 goto done;
182 }
183 if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
184 orangefs_writepages_work(ow, wbc);
185 ow->npages = 0;
186 ret = -1;
187 goto done;
188 }
189 if (ow->off + ow->len == wr->pos) {
190 ow->len += wr->len;
191 ow->pages[ow->npages++] = page;
192 ret = 0;
193 goto done;
194 }
195done:
196 if (ret == -1) {
197 if (ow->npages) {
198 orangefs_writepages_work(ow, wbc);
199 ow->npages = 0;
200 }
201 ret = orangefs_writepage_locked(page, wbc);
202 mapping_set_error(page->mapping, ret);
203 unlock_page(page);
204 end_page_writeback(page);
205 } else {
206 if (ow->npages == ow->maxpages) {
207 orangefs_writepages_work(ow, wbc);
208 ow->npages = 0;
209 }
210 }
211 return ret;
212}
213
214static int orangefs_writepages(struct address_space *mapping,
215 struct writeback_control *wbc)
216{
217 struct orangefs_writepages *ow;
218 struct blk_plug plug;
219 int ret;
220 ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
221 if (!ow)
222 return -ENOMEM;
223 ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
224 ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
225 if (!ow->pages) {
226 kfree(ow);
227 return -ENOMEM;
228 }
229 ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
230 if (!ow->bv) {
231 kfree(ow->pages);
232 kfree(ow);
233 return -ENOMEM;
234 }
235 blk_start_plug(&plug);
236 ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
237 if (ow->npages)
238 ret = orangefs_writepages_work(ow, wbc);
239 blk_finish_plug(&plug);
240 kfree(ow->pages);
241 kfree(ow->bv);
242 kfree(ow);
243 return ret;
244}
245
eabf038f 246static int orangefs_launder_folio(struct folio *);
dd59a647 247
0c4b7cad
MM
248static void orangefs_readahead(struct readahead_control *rac)
249{
250 loff_t offset;
251 struct iov_iter iter;
24523e45 252 struct inode *inode = rac->mapping->host;
0c4b7cad
MM
253 struct xarray *i_pages;
254 struct page *page;
255 loff_t new_start = readahead_pos(rac);
256 int ret;
257 size_t new_len = 0;
258
259 loff_t bytes_remaining = inode->i_size - readahead_pos(rac);
260 loff_t pages_remaining = bytes_remaining / PAGE_SIZE;
261
262 if (pages_remaining >= 1024)
263 new_len = 4194304;
264 else if (pages_remaining > readahead_count(rac))
265 new_len = bytes_remaining;
266
267 if (new_len)
268 readahead_expand(rac, new_start, new_len);
269
270 offset = readahead_pos(rac);
24523e45 271 i_pages = &rac->mapping->i_pages;
0c4b7cad
MM
272
273 iov_iter_xarray(&iter, READ, i_pages, offset, readahead_length(rac));
274
275 /* read in the pages. */
276 if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode,
277 &offset, &iter, readahead_length(rac),
24523e45 278 inode->i_size, NULL, NULL, rac->file)) < 0)
0c4b7cad
MM
279 gossip_debug(GOSSIP_FILE_DEBUG,
280 "%s: wait_for_direct_io failed. \n", __func__);
281 else
282 ret = 0;
283
284 /* clean up. */
285 while ((page = readahead_page(rac))) {
286 page_endio(page, false, ret);
287 put_page(page);
288 }
289}
290
a68d9c60 291static int orangefs_readpage(struct file *file, struct page *page)
5db11c21 292{
eabf038f 293 struct folio *folio = page_folio(page);
5db11c21 294 struct inode *inode = page->mapping->host;
c453dcfc
MB
295 struct iov_iter iter;
296 struct bio_vec bv;
297 ssize_t ret;
dd59a647 298 loff_t off; /* offset into this page */
dd59a647 299
eabf038f
MWO
300 if (folio_test_dirty(folio))
301 orangefs_launder_folio(folio);
c453dcfc
MB
302
303 off = page_offset(page);
304 bv.bv_page = page;
305 bv.bv_len = PAGE_SIZE;
306 bv.bv_offset = 0;
307 iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
308
309 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
0c4b7cad 310 PAGE_SIZE, inode->i_size, NULL, NULL, file);
74f68fce 311 /* this will only zero remaining unread portions of the page data */
c453dcfc 312 iov_iter_zero(~0U, &iter);
5db11c21
MM
313 /* takes care of potential aliasing */
314 flush_dcache_page(page);
c453dcfc 315 if (ret < 0) {
5db11c21
MM
316 SetPageError(page);
317 } else {
318 SetPageUptodate(page);
319 if (PageError(page))
320 ClearPageError(page);
321 ret = 0;
322 }
5db11c21
MM
323 /* unlock the page after the ->readpage() routine completes */
324 unlock_page(page);
0c4b7cad 325 return ret;
5db11c21
MM
326}
327
52e2d0a3
MB
328static int orangefs_write_begin(struct file *file,
329 struct address_space *mapping,
330 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
331 void **fsdata)
332{
333 struct orangefs_write_range *wr;
eabf038f 334 struct folio *folio;
52e2d0a3
MB
335 struct page *page;
336 pgoff_t index;
337 int ret;
338
339 index = pos >> PAGE_SHIFT;
340
341 page = grab_cache_page_write_begin(mapping, index, flags);
342 if (!page)
343 return -ENOMEM;
344
345 *pagep = page;
eabf038f 346 folio = page_folio(page);
52e2d0a3 347
eabf038f 348 if (folio_test_dirty(folio) && !folio_test_private(folio)) {
52e2d0a3
MB
349 /*
350 * Should be impossible. If it happens, launder the page
351 * since we don't know what's dirty. This will WARN in
352 * orangefs_writepage_locked.
353 */
eabf038f 354 ret = orangefs_launder_folio(folio);
52e2d0a3
MB
355 if (ret)
356 return ret;
357 }
eabf038f 358 if (folio_test_private(folio)) {
52e2d0a3 359 struct orangefs_write_range *wr;
eabf038f 360 wr = folio_get_private(folio);
52e2d0a3
MB
361 if (wr->pos + wr->len == pos &&
362 uid_eq(wr->uid, current_fsuid()) &&
363 gid_eq(wr->gid, current_fsgid())) {
364 wr->len += len;
365 goto okay;
366 } else {
eabf038f 367 ret = orangefs_launder_folio(folio);
52e2d0a3
MB
368 if (ret)
369 return ret;
370 }
52e2d0a3
MB
371 }
372
373 wr = kmalloc(sizeof *wr, GFP_KERNEL);
374 if (!wr)
375 return -ENOMEM;
376
377 wr->pos = pos;
378 wr->len = len;
379 wr->uid = current_fsuid();
380 wr->gid = current_fsgid();
eabf038f 381 folio_attach_private(folio, wr);
52e2d0a3 382okay:
52e2d0a3
MB
383 return 0;
384}
385
85ac799c
MB
386static int orangefs_write_end(struct file *file, struct address_space *mapping,
387 loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
388{
8f04e1be
MB
389 struct inode *inode = page->mapping->host;
390 loff_t last_pos = pos + copied;
391
392 /*
393 * No need to use i_size_read() here, the i_size
394 * cannot change under us because we hold the i_mutex.
395 */
396 if (last_pos > inode->i_size)
397 i_size_write(inode, last_pos);
398
399 /* zero the stale part of the page if we did a short copy */
400 if (!PageUptodate(page)) {
401 unsigned from = pos & (PAGE_SIZE - 1);
402 if (copied < len) {
403 zero_user(page, from + copied, len - copied);
404 }
405 /* Set fully written pages uptodate. */
406 if (pos == page_offset(page) &&
407 (len == PAGE_SIZE || pos + len == inode->i_size)) {
408 zero_user_segment(page, from + copied, PAGE_SIZE);
409 SetPageUptodate(page);
410 }
411 }
412
413 set_page_dirty(page);
414 unlock_page(page);
415 put_page(page);
416
85ac799c 417 mark_inode_dirty_sync(file_inode(file));
8f04e1be 418 return copied;
85ac799c
MB
419}
420
2a40be81
MWO
421static void orangefs_invalidate_folio(struct folio *folio,
422 size_t offset, size_t length)
5db11c21 423{
2a40be81 424 struct orangefs_write_range *wr = folio_get_private(folio);
52e2d0a3
MB
425
426 if (offset == 0 && length == PAGE_SIZE) {
2a40be81 427 kfree(folio_detach_private(folio));
8f04e1be 428 return;
52e2d0a3 429 /* write range entirely within invalidate range (or equal) */
2a40be81
MWO
430 } else if (folio_pos(folio) + offset <= wr->pos &&
431 wr->pos + wr->len <= folio_pos(folio) + offset + length) {
432 kfree(folio_detach_private(folio));
52e2d0a3 433 /* XXX is this right? only caller in fs */
2a40be81 434 folio_cancel_dirty(folio);
8f04e1be 435 return;
52e2d0a3 436 /* invalidate range chops off end of write range */
2a40be81
MWO
437 } else if (wr->pos < folio_pos(folio) + offset &&
438 wr->pos + wr->len <= folio_pos(folio) + offset + length &&
439 folio_pos(folio) + offset < wr->pos + wr->len) {
52e2d0a3 440 size_t x;
2a40be81 441 x = wr->pos + wr->len - (folio_pos(folio) + offset);
52e2d0a3
MB
442 WARN_ON(x > wr->len);
443 wr->len -= x;
444 wr->uid = current_fsuid();
445 wr->gid = current_fsgid();
446 /* invalidate range chops off beginning of write range */
2a40be81
MWO
447 } else if (folio_pos(folio) + offset <= wr->pos &&
448 folio_pos(folio) + offset + length < wr->pos + wr->len &&
449 wr->pos < folio_pos(folio) + offset + length) {
52e2d0a3 450 size_t x;
2a40be81 451 x = folio_pos(folio) + offset + length - wr->pos;
52e2d0a3
MB
452 WARN_ON(x > wr->len);
453 wr->pos += x;
454 wr->len -= x;
455 wr->uid = current_fsuid();
456 wr->gid = current_fsgid();
457 /* invalidate range entirely within write range (punch hole) */
2a40be81
MWO
458 } else if (wr->pos < folio_pos(folio) + offset &&
459 folio_pos(folio) + offset + length < wr->pos + wr->len) {
52e2d0a3
MB
460 /* XXX what do we do here... should not WARN_ON */
461 WARN_ON(1);
462 /* punch hole */
463 /*
464 * should we just ignore this and write it out anyway?
465 * it hardly makes sense
466 */
8f04e1be 467 return;
52e2d0a3
MB
468 /* non-overlapping ranges */
469 } else {
470 /* WARN if they do overlap */
2a40be81
MWO
471 if (!((folio_pos(folio) + offset + length <= wr->pos) ^
472 (wr->pos + wr->len <= folio_pos(folio) + offset))) {
52e2d0a3 473 WARN_ON(1);
2a40be81
MWO
474 printk("invalidate range offset %llu length %zu\n",
475 folio_pos(folio) + offset, length);
52e2d0a3
MB
476 printk("write range offset %llu length %zu\n",
477 wr->pos, wr->len);
478 }
8f04e1be 479 return;
52e2d0a3 480 }
8f04e1be
MB
481
482 /*
483 * Above there are returns where wr is freed or where we WARN.
484 * Thus the following runs if wr was modified above.
485 */
486
eabf038f 487 orangefs_launder_folio(folio);
52e2d0a3 488}
5db11c21 489
52e2d0a3
MB
490static int orangefs_releasepage(struct page *page, gfp_t foo)
491{
492 return !PagePrivate(page);
493}
5db11c21 494
52e2d0a3
MB
495static void orangefs_freepage(struct page *page)
496{
4c42be38 497 kfree(detach_page_private(page));
5db11c21
MM
498}
499
eabf038f 500static int orangefs_launder_folio(struct folio *folio)
5db11c21 501{
52e2d0a3
MB
502 int r = 0;
503 struct writeback_control wbc = {
504 .sync_mode = WB_SYNC_ALL,
505 .nr_to_write = 0,
506 };
eabf038f
MWO
507 folio_wait_writeback(folio);
508 if (folio_clear_dirty_for_io(folio)) {
509 r = orangefs_writepage_locked(&folio->page, &wbc);
510 folio_end_writeback(folio);
52e2d0a3
MB
511 }
512 return r;
5db11c21
MM
513}
514
3903f150
MM
515static ssize_t orangefs_direct_IO(struct kiocb *iocb,
516 struct iov_iter *iter)
517{
3e9dfc6e
MB
518 /*
519 * Comment from original do_readv_writev:
520 * Common entry point for read/write/readv/writev
521 * This function will dispatch it to either the direct I/O
522 * or buffered I/O path depending on the mount options and/or
523 * augmented/extended metadata attached to the file.
524 * Note: File extended attributes override any mount options.
525 */
c453dcfc 526 struct file *file = iocb->ki_filp;
3e9dfc6e
MB
527 loff_t pos = iocb->ki_pos;
528 enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
529 ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
530 loff_t *offset = &pos;
531 struct inode *inode = file->f_mapping->host;
532 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
533 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
534 size_t count = iov_iter_count(iter);
3e9dfc6e
MB
535 ssize_t total_count = 0;
536 ssize_t ret = -EINVAL;
537 int i = 0;
538
539 gossip_debug(GOSSIP_FILE_DEBUG,
540 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
541 __func__,
542 handle,
543 (int)count);
544
545 if (type == ORANGEFS_IO_WRITE) {
546 gossip_debug(GOSSIP_FILE_DEBUG,
547 "%s(%pU): proceeding with offset : %llu, "
548 "size %d\n",
549 __func__,
550 handle,
551 llu(*offset),
552 (int)count);
553 }
554
555 if (count == 0) {
556 ret = 0;
557 goto out;
558 }
559
560 while (iov_iter_count(iter)) {
561 size_t each_count = iov_iter_count(iter);
562 size_t amt_complete;
563 i++;
564
565 /* how much to transfer in this loop iteration */
566 if (each_count > orangefs_bufmap_size_query())
567 each_count = orangefs_bufmap_size_query();
568
569 gossip_debug(GOSSIP_FILE_DEBUG,
570 "%s(%pU): size of each_count(%d)\n",
571 __func__,
572 handle,
573 (int)each_count);
574 gossip_debug(GOSSIP_FILE_DEBUG,
575 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
576 __func__,
577 handle,
578 (int)*offset);
579
580 ret = wait_for_direct_io(type, inode, offset, iter,
f9bbb682 581 each_count, 0, NULL, NULL, file);
3e9dfc6e
MB
582 gossip_debug(GOSSIP_FILE_DEBUG,
583 "%s(%pU): return from wait_for_io:%d\n",
584 __func__,
585 handle,
586 (int)ret);
587
588 if (ret < 0)
589 goto out;
590
591 *offset += ret;
592 total_count += ret;
593 amt_complete = ret;
594
595 gossip_debug(GOSSIP_FILE_DEBUG,
596 "%s(%pU): AFTER wait_for_io: offset is %d\n",
597 __func__,
598 handle,
599 (int)*offset);
600
601 /*
602 * if we got a short I/O operations,
603 * fall out and return what we got so far
604 */
605 if (amt_complete < each_count)
606 break;
607 } /*end while */
608
609out:
610 if (total_count > 0)
611 ret = total_count;
612 if (ret > 0) {
613 if (type == ORANGEFS_IO_READ) {
614 file_accessed(file);
615 } else {
616 file_update_time(file);
617 if (*offset > i_size_read(inode))
618 i_size_write(inode, *offset);
619 }
620 }
621
622 gossip_debug(GOSSIP_FILE_DEBUG,
623 "%s(%pU): Value(%d) returned.\n",
624 __func__,
625 handle,
626 (int)ret);
627
628 return ret;
3903f150 629}
5db11c21 630
8bb8aefd 631/** ORANGEFS2 implementation of address space operations */
bdd6f083 632static const struct address_space_operations orangefs_address_operations = {
85ac799c 633 .writepage = orangefs_writepage,
0c4b7cad 634 .readahead = orangefs_readahead,
8bb8aefd 635 .readpage = orangefs_readpage,
8f04e1be 636 .writepages = orangefs_writepages,
187c82cb 637 .dirty_folio = filemap_dirty_folio,
52e2d0a3 638 .write_begin = orangefs_write_begin,
85ac799c 639 .write_end = orangefs_write_end,
2a40be81 640 .invalidate_folio = orangefs_invalidate_folio,
8bb8aefd 641 .releasepage = orangefs_releasepage,
52e2d0a3 642 .freepage = orangefs_freepage,
eabf038f 643 .launder_folio = orangefs_launder_folio,
3903f150 644 .direct_IO = orangefs_direct_IO,
5db11c21
MM
645};
646
52e2d0a3
MB
647vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
648{
eabf038f 649 struct folio *folio = page_folio(vmf->page);
52e2d0a3 650 struct inode *inode = file_inode(vmf->vma->vm_file);
8f04e1be
MB
651 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
652 unsigned long *bitlock = &orangefs_inode->bitlock;
653 vm_fault_t ret;
52e2d0a3
MB
654 struct orangefs_write_range *wr;
655
8f04e1be
MB
656 sb_start_pagefault(inode->i_sb);
657
658 if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
659 ret = VM_FAULT_RETRY;
660 goto out;
661 }
662
eabf038f
MWO
663 folio_lock(folio);
664 if (folio_test_dirty(folio) && !folio_test_private(folio)) {
52e2d0a3 665 /*
eabf038f 666 * Should be impossible. If it happens, launder the folio
52e2d0a3
MB
667 * since we don't know what's dirty. This will WARN in
668 * orangefs_writepage_locked.
669 */
eabf038f 670 if (orangefs_launder_folio(folio)) {
8f04e1be 671 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
52e2d0a3
MB
672 goto out;
673 }
674 }
eabf038f
MWO
675 if (folio_test_private(folio)) {
676 wr = folio_get_private(folio);
52e2d0a3
MB
677 if (uid_eq(wr->uid, current_fsuid()) &&
678 gid_eq(wr->gid, current_fsgid())) {
eabf038f 679 wr->pos = page_offset(vmf->page);
52e2d0a3
MB
680 wr->len = PAGE_SIZE;
681 goto okay;
682 } else {
eabf038f 683 if (orangefs_launder_folio(folio)) {
8f04e1be 684 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
52e2d0a3
MB
685 goto out;
686 }
687 }
688 }
689 wr = kmalloc(sizeof *wr, GFP_KERNEL);
690 if (!wr) {
8f04e1be 691 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
52e2d0a3
MB
692 goto out;
693 }
eabf038f 694 wr->pos = page_offset(vmf->page);
52e2d0a3
MB
695 wr->len = PAGE_SIZE;
696 wr->uid = current_fsuid();
697 wr->gid = current_fsgid();
eabf038f 698 folio_attach_private(folio, wr);
52e2d0a3
MB
699okay:
700
52e2d0a3 701 file_update_time(vmf->vma->vm_file);
eabf038f
MWO
702 if (folio->mapping != inode->i_mapping) {
703 folio_unlock(folio);
8f04e1be 704 ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
52e2d0a3
MB
705 goto out;
706 }
707
708 /*
eabf038f 709 * We mark the folio dirty already here so that when freeze is in
52e2d0a3 710 * progress, we are guaranteed that writeback during freezing will
eabf038f 711 * see the dirty folio and writeprotect it again.
52e2d0a3 712 */
eabf038f
MWO
713 folio_mark_dirty(folio);
714 folio_wait_stable(folio);
8f04e1be 715 ret = VM_FAULT_LOCKED;
52e2d0a3
MB
716out:
717 sb_end_pagefault(inode->i_sb);
718 return ret;
719}
720
8bb8aefd 721static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
5db11c21 722{
8bb8aefd
YL
723 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
724 struct orangefs_kernel_op_s *new_op;
fecd86aa 725 loff_t orig_size;
5db11c21
MM
726 int ret = -EINVAL;
727
728 gossip_debug(GOSSIP_INODE_DEBUG,
729 "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
730 __func__,
731 get_khandle_from_ino(inode),
8bb8aefd
YL
732 &orangefs_inode->refn.khandle,
733 orangefs_inode->refn.fs_id,
5db11c21
MM
734 iattr->ia_size);
735
fecd86aa 736 /* Ensure that we have a up to date size, so we know if it changed. */
8b60785c 737 ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
fecd86aa
MB
738 if (ret == -ESTALE)
739 ret = -EIO;
740 if (ret) {
741 gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
742 __func__, ret);
743 return ret;
744 }
745 orig_size = i_size_read(inode);
746
33713cd0
MB
747 /* This is truncate_setsize in a different order. */
748 truncate_pagecache(inode, iattr->ia_size);
749 i_size_write(inode, iattr->ia_size);
750 if (iattr->ia_size > orig_size)
751 pagecache_isize_extended(inode, orig_size, iattr->ia_size);
5db11c21 752
8bb8aefd 753 new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
5db11c21
MM
754 if (!new_op)
755 return -ENOMEM;
756
8bb8aefd 757 new_op->upcall.req.truncate.refn = orangefs_inode->refn;
5db11c21
MM
758 new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
759
95f5f88f
MM
760 ret = service_operation(new_op,
761 __func__,
762 get_interruptible_flag(inode));
5db11c21
MM
763
764 /*
765 * the truncate has no downcall members to retrieve, but
766 * the status value tells us if it went through ok or not
767 */
95f5f88f 768 gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
5db11c21
MM
769
770 op_release(new_op);
771
772 if (ret != 0)
773 return ret;
774
f83140c1 775 if (orig_size != i_size_read(inode))
5db11c21 776 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
5db11c21
MM
777
778 return ret;
779}
780
afd9fb2a 781int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
5db11c21 782{
df2d7337 783 int ret;
5db11c21 784
afd9fb2a
MB
785 if (iattr->ia_valid & ATTR_MODE) {
786 if (iattr->ia_mode & (S_ISVTX)) {
787 if (is_root_handle(inode)) {
788 /*
789 * allow sticky bit to be set on root (since
790 * it shows up that way by default anyhow),
791 * but don't show it to the server
792 */
793 iattr->ia_mode -= S_ISVTX;
794 } else {
795 gossip_debug(GOSSIP_UTILS_DEBUG,
796 "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
8f04e1be
MB
797 ret = -EINVAL;
798 goto out;
afd9fb2a
MB
799 }
800 }
801 if (iattr->ia_mode & (S_ISUID)) {
802 gossip_debug(GOSSIP_UTILS_DEBUG,
803 "Attempting to set setuid bit (not supported); returning EINVAL.\n");
8f04e1be
MB
804 ret = -EINVAL;
805 goto out;
afd9fb2a
MB
806 }
807 }
5db11c21 808
53950ef5 809 if (iattr->ia_valid & ATTR_SIZE) {
8bb8aefd 810 ret = orangefs_setattr_size(inode, iattr);
5db11c21
MM
811 if (ret)
812 goto out;
813 }
814
afd9fb2a
MB
815again:
816 spin_lock(&inode->i_lock);
817 if (ORANGEFS_I(inode)->attr_valid) {
818 if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
819 gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
820 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
821 } else {
822 spin_unlock(&inode->i_lock);
823 write_inode_now(inode, 1);
824 goto again;
825 }
826 } else {
827 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
828 ORANGEFS_I(inode)->attr_uid = current_fsuid();
829 ORANGEFS_I(inode)->attr_gid = current_fsgid();
830 }
2f221d6f 831 setattr_copy(&init_user_ns, inode, iattr);
afd9fb2a 832 spin_unlock(&inode->i_lock);
5db11c21
MM
833 mark_inode_dirty(inode);
834
df2d7337 835 if (iattr->ia_valid & ATTR_MODE)
5db11c21 836 /* change mod on a file that has ACLs */
e65ce2a5 837 ret = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
5db11c21 838
df2d7337 839 ret = 0;
5db11c21 840out:
afd9fb2a
MB
841 return ret;
842}
843
844/*
845 * Change attributes of an object referenced by dentry.
846 */
549c7297
CB
847int orangefs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
848 struct iattr *iattr)
afd9fb2a
MB
849{
850 int ret;
851 gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
852 dentry);
2f221d6f 853 ret = setattr_prepare(&init_user_ns, dentry, iattr);
afd9fb2a
MB
854 if (ret)
855 goto out;
856 ret = __orangefs_setattr(d_inode(dentry), iattr);
857 sync_inode_metadata(d_inode(dentry), 1);
858out:
859 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
860 ret);
5db11c21
MM
861 return ret;
862}
863
864/*
865 * Obtain attributes of an object given a dentry
866 */
549c7297
CB
867int orangefs_getattr(struct user_namespace *mnt_userns, const struct path *path,
868 struct kstat *stat, u32 request_mask, unsigned int flags)
5db11c21 869{
e6b998ab 870 int ret;
a528d35e 871 struct inode *inode = path->dentry->d_inode;
5db11c21
MM
872
873 gossip_debug(GOSSIP_INODE_DEBUG,
5e4f606e
MB
874 "orangefs_getattr: called on %pd mask %u\n",
875 path->dentry, request_mask);
5db11c21 876
8b60785c
MB
877 ret = orangefs_inode_getattr(inode,
878 request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
5db11c21 879 if (ret == 0) {
0d56a451 880 generic_fillattr(&init_user_ns, inode, stat);
a7d3e78a 881
5db11c21 882 /* override block size reported to stat */
5678b5d6
CH
883 if (!(request_mask & STATX_SIZE))
884 stat->result_mask &= ~STATX_SIZE;
7f54910f 885
4f911138 886 generic_fill_statx_attr(inode, stat);
5db11c21
MM
887 }
888 return ret;
889}
890
549c7297
CB
891int orangefs_permission(struct user_namespace *mnt_userns,
892 struct inode *inode, int mask)
933287da
MB
893{
894 int ret;
895
896 if (mask & MAY_NOT_BLOCK)
897 return -ECHILD;
898
899 gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
900
901 /* Make sure the permission (and other common attrs) are up to date. */
8b60785c 902 ret = orangefs_inode_getattr(inode, 0);
933287da
MB
903 if (ret < 0)
904 return ret;
905
47291baa 906 return generic_permission(&init_user_ns, inode, mask);
933287da
MB
907}
908
95582b00 909int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
a55f2d86
MB
910{
911 struct iattr iattr;
912 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
913 get_khandle_from_ino(inode));
914 generic_update_time(inode, time, flags);
915 memset(&iattr, 0, sizeof iattr);
916 if (flags & S_ATIME)
917 iattr.ia_valid |= ATTR_ATIME;
918 if (flags & S_CTIME)
919 iattr.ia_valid |= ATTR_CTIME;
920 if (flags & S_MTIME)
921 iattr.ia_valid |= ATTR_MTIME;
afd9fb2a 922 return __orangefs_setattr(inode, &iattr);
a55f2d86
MB
923}
924
1f26b062
MS
925static int orangefs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
926{
927 u64 val = 0;
928 int ret;
929
930 gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__,
931 dentry);
932
933 ret = orangefs_inode_getxattr(d_inode(dentry),
934 "user.pvfs2.meta_hint",
935 &val, sizeof(val));
936 if (ret < 0 && ret != -ENODATA)
937 return ret;
938
939 gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val);
940
941 fileattr_fill_flags(fa, val);
942 return 0;
943}
944
945static int orangefs_fileattr_set(struct user_namespace *mnt_userns,
946 struct dentry *dentry, struct fileattr *fa)
947{
948 u64 val = 0;
949
950 gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__,
951 dentry);
952 /*
953 * ORANGEFS_MIRROR_FL is set internally when the mirroring mode is
954 * turned on for a file. The user is not allowed to turn on this bit,
955 * but the bit is present if the user first gets the flags and then
956 * updates the flags with some new settings. So, we ignore it in the
957 * following edit. bligon.
958 */
959 if (fileattr_has_fsx(fa) ||
960 (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL | ORANGEFS_MIRROR_FL))) {
961 gossip_err("%s: only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n",
962 __func__);
963 return -EOPNOTSUPP;
964 }
965 val = fa->flags;
966 gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val);
967 return orangefs_inode_setxattr(d_inode(dentry),
968 "user.pvfs2.meta_hint",
969 &val, sizeof(val), 0);
970}
971
95f5f88f 972/* ORANGEFS2 implementation of VFS inode operations for files */
bdd6f083 973static const struct inode_operations orangefs_file_inode_operations = {
8bb8aefd
YL
974 .get_acl = orangefs_get_acl,
975 .set_acl = orangefs_set_acl,
976 .setattr = orangefs_setattr,
977 .getattr = orangefs_getattr,
8bb8aefd 978 .listxattr = orangefs_listxattr,
933287da 979 .permission = orangefs_permission,
a55f2d86 980 .update_time = orangefs_update_time,
1f26b062
MS
981 .fileattr_get = orangefs_fileattr_get,
982 .fileattr_set = orangefs_fileattr_set,
5db11c21
MM
983};
984
8bb8aefd 985static int orangefs_init_iops(struct inode *inode)
5db11c21 986{
8bb8aefd 987 inode->i_mapping->a_ops = &orangefs_address_operations;
5db11c21
MM
988
989 switch (inode->i_mode & S_IFMT) {
990 case S_IFREG:
8bb8aefd
YL
991 inode->i_op = &orangefs_file_inode_operations;
992 inode->i_fop = &orangefs_file_operations;
5db11c21
MM
993 break;
994 case S_IFLNK:
8bb8aefd 995 inode->i_op = &orangefs_symlink_inode_operations;
5db11c21
MM
996 break;
997 case S_IFDIR:
8bb8aefd
YL
998 inode->i_op = &orangefs_dir_inode_operations;
999 inode->i_fop = &orangefs_dir_operations;
5db11c21
MM
1000 break;
1001 default:
1002 gossip_debug(GOSSIP_INODE_DEBUG,
1003 "%s: unsupported mode\n",
1004 __func__);
1005 return -EINVAL;
1006 }
1007
1008 return 0;
1009}
1010
1011/*
95f5f88f
MM
1012 * Given an ORANGEFS object identifier (fsid, handle), convert it into
1013 * a ino_t type that will be used as a hash-index from where the handle will
5db11c21
MM
1014 * be searched for in the VFS hash table of inodes.
1015 */
8bb8aefd 1016static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
5db11c21
MM
1017{
1018 if (!ref)
1019 return 0;
8bb8aefd 1020 return orangefs_khandle_to_ino(&(ref->khandle));
5db11c21
MM
1021}
1022
1023/*
1024 * Called to set up an inode from iget5_locked.
1025 */
8bb8aefd 1026static int orangefs_set_inode(struct inode *inode, void *data)
5db11c21 1027{
8bb8aefd 1028 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
a4c680a0
MB
1029 ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
1030 ORANGEFS_I(inode)->refn.khandle = ref->khandle;
afd9fb2a 1031 ORANGEFS_I(inode)->attr_valid = 0;
fc2e2e9c 1032 hash_init(ORANGEFS_I(inode)->xattr_cache);
8f04e1be
MB
1033 ORANGEFS_I(inode)->mapping_time = jiffies - 1;
1034 ORANGEFS_I(inode)->bitlock = 0;
5db11c21
MM
1035 return 0;
1036}
1037
1038/*
1039 * Called to determine if handles match.
1040 */
8bb8aefd 1041static int orangefs_test_inode(struct inode *inode, void *data)
5db11c21 1042{
8bb8aefd
YL
1043 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1044 struct orangefs_inode_s *orangefs_inode = NULL;
5db11c21 1045
8bb8aefd 1046 orangefs_inode = ORANGEFS_I(inode);
95f5f88f
MM
1047 /* test handles and fs_ids... */
1048 return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
1049 &(ref->khandle)) &&
1050 orangefs_inode->refn.fs_id == ref->fs_id);
5db11c21
MM
1051}
1052
1053/*
8bb8aefd 1054 * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
5db11c21
MM
1055 * file handle.
1056 *
1057 * @sb: the file system super block instance.
95f5f88f 1058 * @ref: The ORANGEFS object for which we are trying to locate an inode.
5db11c21 1059 */
95f5f88f
MM
1060struct inode *orangefs_iget(struct super_block *sb,
1061 struct orangefs_object_kref *ref)
5db11c21
MM
1062{
1063 struct inode *inode = NULL;
1064 unsigned long hash;
1065 int error;
1066
8bb8aefd 1067 hash = orangefs_handle_hash(ref);
95f5f88f
MM
1068 inode = iget5_locked(sb,
1069 hash,
1070 orangefs_test_inode,
1071 orangefs_set_inode,
1072 ref);
b5d72cdc
MM
1073
1074 if (!inode)
1075 return ERR_PTR(-ENOMEM);
1076
1077 if (!(inode->i_state & I_NEW))
5db11c21
MM
1078 return inode;
1079
8b60785c 1080 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
5db11c21
MM
1081 if (error) {
1082 iget_failed(inode);
1083 return ERR_PTR(error);
1084 }
1085
1086 inode->i_ino = hash; /* needed for stat etc */
8bb8aefd 1087 orangefs_init_iops(inode);
5db11c21
MM
1088 unlock_new_inode(inode);
1089
1090 gossip_debug(GOSSIP_INODE_DEBUG,
1091 "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
1092 &ref->khandle,
1093 ref->fs_id,
1094 hash,
1095 inode->i_ino);
1096
1097 return inode;
1098}
1099
1100/*
1101 * Allocate an inode for a newly created file and insert it into the inode hash.
1102 */
8bb8aefd
YL
1103struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
1104 int mode, dev_t dev, struct orangefs_object_kref *ref)
5db11c21 1105{
8bb8aefd 1106 unsigned long hash = orangefs_handle_hash(ref);
5db11c21
MM
1107 struct inode *inode;
1108 int error;
1109
1110 gossip_debug(GOSSIP_INODE_DEBUG,
5253487e
MM
1111 "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
1112 __func__,
5db11c21
MM
1113 sb,
1114 MAJOR(dev),
1115 MINOR(dev),
1116 mode);
1117
1118 inode = new_inode(sb);
1119 if (!inode)
56249998 1120 return ERR_PTR(-ENOMEM);
5db11c21 1121
8bb8aefd 1122 orangefs_set_inode(inode, ref);
5db11c21
MM
1123 inode->i_ino = hash; /* needed for stat etc */
1124
8b60785c 1125 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
5db11c21
MM
1126 if (error)
1127 goto out_iput;
1128
8bb8aefd 1129 orangefs_init_iops(inode);
5db11c21
MM
1130 inode->i_rdev = dev;
1131
8bb8aefd 1132 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
5db11c21
MM
1133 if (error < 0)
1134 goto out_iput;
1135
1136 gossip_debug(GOSSIP_INODE_DEBUG,
1137 "Initializing ACL's for inode %pU\n",
1138 get_khandle_from_ino(inode));
8bb8aefd 1139 orangefs_init_acl(inode, dir);
5db11c21
MM
1140 return inode;
1141
1142out_iput:
1143 iput(inode);
1144 return ERR_PTR(error);
1145}