Merge tag 'probes-fixes-v6.16-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / nfs / blocklayout / blocklayout.c
CommitLineData
155e7524
FI
1/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
9549ec01 32
155e7524
FI
33#include <linux/module.h>
34#include <linux/init.h>
fe0a9b74
JR
35#include <linux/mount.h>
36#include <linux/namei.h>
9549ec01 37#include <linux/bio.h> /* struct bio */
88c9e421 38#include <linux/prefetch.h>
6296556f 39#include <linux/pagevec.h>
155e7524 40
10bd295a 41#include "../pnfs.h"
76e697ba 42#include "../nfs4session.h"
10bd295a 43#include "../internal.h"
155e7524
FI
44#include "blocklayout.h"
45
46#define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
8067253c 52static bool is_hole(struct pnfs_block_extent *be)
9549ec01 53{
8067253c
CH
54 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
650e2d39
FI
62}
63
9549ec01
FI
64/* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67struct parallel_io {
68 struct kref refcnt;
8067253c 69 void (*pnfs_callback) (void *data);
9549ec01
FI
70 void *data;
71};
72
73static inline struct parallel_io *alloc_parallel(void *data)
74{
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83}
84
85static inline void get_parallel(struct parallel_io *p)
86{
87 kref_get(&p->refcnt);
88}
89
90static void destroy_parallel(struct kref *kref)
91{
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
8067253c 95 p->pnfs_callback(p->data);
9549ec01
FI
96 kfree(p);
97}
98
99static inline void put_parallel(struct parallel_io *p)
100{
101 kref_put(&p->refcnt, destroy_parallel);
102}
103
104static struct bio *
4e49ea4a 105bl_submit_bio(struct bio *bio)
9549ec01
FI
106{
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
95fe6c1a 110 bio_op(bio) == READ ? "read" : "write",
4e49ea4a 111 bio->bi_iter.bi_size,
4f024f37 112 (unsigned long long)bio->bi_iter.bi_sector);
4e49ea4a 113 submit_bio(bio);
9549ec01
FI
114 }
115 return NULL;
116}
117
f34462c3
BC
118static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
119{
120 return offset >= map->start && offset < map->start + map->len;
121}
122
5c83746a 123static struct bio *
5d12ce77 124do_add_page_to_bio(struct bio *bio, int npg, enum req_op op, sector_t isect,
5c83746a 125 struct page *page, struct pnfs_block_dev_map *map,
4246a0b6 126 struct pnfs_block_extent *be, bio_end_io_t end_io,
5c83746a 127 struct parallel_io *par, unsigned int offset, int *len)
9549ec01 128{
5c83746a
CH
129 struct pnfs_block_dev *dev =
130 container_of(be->be_device, struct pnfs_block_dev, node);
131 u64 disk_addr, end;
132
fe6e1e8d 133 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
5d12ce77 134 npg, (__force u32)op, (unsigned long long)isect, offset, *len);
5c83746a
CH
135
136 /* translate to device offset */
137 isect += be->be_v_offset;
138 isect -= be->be_f_offset;
139
140 /* translate to physical disk offset */
141 disk_addr = (u64)isect << SECTOR_SHIFT;
f34462c3
BC
142 if (!offset_in_map(disk_addr, map)) {
143 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
5c83746a 144 return ERR_PTR(-EIO);
4e49ea4a 145 bio = bl_submit_bio(bio);
5c83746a
CH
146 }
147 disk_addr += map->disk_offset;
148 disk_addr -= map->start;
149
150 /* limit length to what the device mapping allows */
151 end = disk_addr + *len;
152 if (end >= map->start + map->len)
153 *len = map->start + map->len - disk_addr;
154
9549ec01
FI
155retry:
156 if (!bio) {
5d12ce77 157 bio = bio_alloc(map->bdev, bio_max_segs(npg), op, GFP_NOIO);
5d2ca213 158 bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT;
5d2ca213
CH
159 bio->bi_end_io = end_io;
160 bio->bi_private = par;
9549ec01 161 }
5c83746a 162 if (bio_add_page(bio, page, *len, offset) < *len) {
4e49ea4a 163 bio = bl_submit_bio(bio);
9549ec01
FI
164 goto retry;
165 }
166 return bio;
167}
168
b3dce6a2
BC
169static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
170{
171 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
172 size_t bytes_left = header->args.count;
173 sector_t isect, extent_length = 0;
174 struct pnfs_block_extent be;
175
176 isect = header->args.offset >> SECTOR_SHIFT;
177 bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
178
179 while (bytes_left > 0) {
180 if (!ext_tree_lookup(bl, isect, &be, rw))
181 return;
182 extent_length = be.be_length - (isect - be.be_f_offset);
183 nfs4_mark_deviceid_unavailable(be.be_device);
184 isect += extent_length;
185 if (bytes_left > extent_length << SECTOR_SHIFT)
186 bytes_left -= extent_length << SECTOR_SHIFT;
187 else
188 bytes_left = 0;
189 }
190}
191
4246a0b6 192static void bl_end_io_read(struct bio *bio)
9549ec01
FI
193{
194 struct parallel_io *par = bio->bi_private;
9549ec01 195
4e4cbee9 196 if (bio->bi_status) {
d45f60c6 197 struct nfs_pgio_header *header = par->data;
cd841605
FI
198
199 if (!header->pnfs_error)
200 header->pnfs_error = -EIO;
201 pnfs_set_lo_fail(header->lseg);
b3dce6a2 202 bl_mark_devices_unavailable(header, false);
9549ec01 203 }
8c792ea9 204
9549ec01
FI
205 bio_put(bio);
206 put_parallel(par);
207}
208
209static void bl_read_cleanup(struct work_struct *work)
210{
211 struct rpc_task *task;
d45f60c6 212 struct nfs_pgio_header *hdr;
9549ec01
FI
213 dprintk("%s enter\n", __func__);
214 task = container_of(work, struct rpc_task, u.tk_work);
d45f60c6
WAA
215 hdr = container_of(task, struct nfs_pgio_header, task);
216 pnfs_ld_read_done(hdr);
9549ec01
FI
217}
218
219static void
8067253c 220bl_end_par_io_read(void *data)
9549ec01 221{
d45f60c6 222 struct nfs_pgio_header *hdr = data;
9549ec01 223
d45f60c6
WAA
224 hdr->task.tk_status = hdr->pnfs_error;
225 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
226 schedule_work(&hdr->task.u.tk_work);
9549ec01
FI
227}
228
155e7524 229static enum pnfs_try_status
8067253c 230bl_read_pagelist(struct nfs_pgio_header *header)
155e7524 231{
8067253c 232 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
5c83746a 233 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
9549ec01 234 struct bio *bio = NULL;
8067253c 235 struct pnfs_block_extent be;
9549ec01
FI
236 sector_t isect, extent_length = 0;
237 struct parallel_io *par;
8067253c
CH
238 loff_t f_offset = header->args.offset;
239 size_t bytes_left = header->args.count;
15ae2c7b 240 unsigned int pg_offset = header->args.pgbase, pg_len;
8067253c 241 struct page **pages = header->args.pages;
09cbfeaf 242 int pg_index = header->args.pgbase >> PAGE_SHIFT;
f742dc4a 243 const bool is_dio = (header->dreq != NULL);
be98fd0a 244 struct blk_plug plug;
8067253c 245 int i;
9549ec01 246
6f00866d 247 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
8067253c
CH
248 header->page_array.npages, f_offset,
249 (unsigned int)header->args.count);
9549ec01 250
8067253c 251 par = alloc_parallel(header);
9549ec01 252 if (!par)
8067253c 253 return PNFS_NOT_ATTEMPTED;
9549ec01 254 par->pnfs_callback = bl_end_par_io_read;
9549ec01 255
be98fd0a
CH
256 blk_start_plug(&plug);
257
9549ec01
FI
258 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
259 /* Code assumes extents are page-aligned */
8067253c 260 for (i = pg_index; i < header->page_array.npages; i++) {
921b81a8 261 if (extent_length <= 0) {
9549ec01 262 /* We've used up the previous extent */
4e49ea4a 263 bio = bl_submit_bio(bio);
8067253c 264
9549ec01 265 /* Get the next one */
8067253c 266 if (!ext_tree_lookup(bl, isect, &be, false)) {
cd841605 267 header->pnfs_error = -EIO;
9549ec01
FI
268 goto out;
269 }
8067253c 270 extent_length = be.be_length - (isect - be.be_f_offset);
9549ec01 271 }
f742dc4a
PT
272
273 if (is_dio) {
09cbfeaf
KS
274 if (pg_offset + bytes_left > PAGE_SIZE)
275 pg_len = PAGE_SIZE - pg_offset;
f742dc4a
PT
276 else
277 pg_len = bytes_left;
f742dc4a 278 } else {
3a6fd1f0 279 BUG_ON(pg_offset != 0);
09cbfeaf 280 pg_len = PAGE_SIZE;
f742dc4a
PT
281 }
282
8067253c 283 if (is_hole(&be)) {
4e49ea4a 284 bio = bl_submit_bio(bio);
9549ec01
FI
285 /* Fill hole w/ zeroes w/o accessing device */
286 dprintk("%s Zeroing page for hole\n", __func__);
f742dc4a 287 zero_user_segment(pages[i], pg_offset, pg_len);
5c83746a
CH
288
289 /* invalidate map */
290 map.start = NFS4_MAX_UINT64;
9549ec01 291 } else {
823b0c9d 292 bio = do_add_page_to_bio(bio,
8067253c 293 header->page_array.npages - i,
5d12ce77 294 REQ_OP_READ,
5c83746a 295 isect, pages[i], &map, &be,
f742dc4a 296 bl_end_io_read, par,
5c83746a 297 pg_offset, &pg_len);
9549ec01 298 if (IS_ERR(bio)) {
cd841605 299 header->pnfs_error = PTR_ERR(bio);
e6d05a75 300 bio = NULL;
9549ec01
FI
301 goto out;
302 }
303 }
f742dc4a 304 isect += (pg_len >> SECTOR_SHIFT);
921b81a8 305 extent_length -= (pg_len >> SECTOR_SHIFT);
5c83746a
CH
306 f_offset += pg_len;
307 bytes_left -= pg_len;
15ae2c7b 308 pg_offset = 0;
9549ec01 309 }
cd841605 310 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
8067253c
CH
311 header->res.eof = 1;
312 header->res.count = header->inode->i_size - header->args.offset;
9549ec01 313 } else {
8067253c 314 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
9549ec01
FI
315 }
316out:
4e49ea4a 317 bl_submit_bio(bio);
be98fd0a 318 blk_finish_plug(&plug);
9549ec01
FI
319 put_parallel(par);
320 return PNFS_ATTEMPTED;
31e6306a
FI
321}
322
4246a0b6 323static void bl_end_io_write(struct bio *bio)
650e2d39
FI
324{
325 struct parallel_io *par = bio->bi_private;
d45f60c6 326 struct nfs_pgio_header *header = par->data;
650e2d39 327
4e4cbee9 328 if (bio->bi_status) {
cd841605
FI
329 if (!header->pnfs_error)
330 header->pnfs_error = -EIO;
331 pnfs_set_lo_fail(header->lseg);
b3dce6a2 332 bl_mark_devices_unavailable(header, true);
650e2d39
FI
333 }
334 bio_put(bio);
335 put_parallel(par);
336}
337
338/* Function scheduled for call during bl_end_par_io_write,
339 * it marks sectors as written and extends the commitlist.
340 */
341static void bl_write_cleanup(struct work_struct *work)
342{
8067253c
CH
343 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
344 struct nfs_pgio_header *hdr =
345 container_of(task, struct nfs_pgio_header, task);
346
650e2d39 347 dprintk("%s enter\n", __func__);
8067253c 348
d45f60c6 349 if (likely(!hdr->pnfs_error)) {
8067253c 350 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
09cbfeaf 351 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
8067253c 352 u64 end = (hdr->args.offset + hdr->args.count +
09cbfeaf 353 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
a3f9d1b5 354 u64 lwb = hdr->args.offset + hdr->args.count;
8067253c
CH
355
356 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
a3f9d1b5 357 (end - start) >> SECTOR_SHIFT, lwb);
31e6306a 358 }
8067253c 359
d45f60c6 360 pnfs_ld_write_done(hdr);
650e2d39
FI
361}
362
363/* Called when last of bios associated with a bl_write_pagelist call finishes */
8067253c 364static void bl_end_par_io_write(void *data)
650e2d39 365{
d45f60c6 366 struct nfs_pgio_header *hdr = data;
650e2d39 367
d45f60c6 368 hdr->task.tk_status = hdr->pnfs_error;
c65e6254 369 hdr->verf.committed = NFS_FILE_SYNC;
d45f60c6
WAA
370 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
371 schedule_work(&hdr->task.u.tk_work);
650e2d39
FI
372}
373
155e7524 374static enum pnfs_try_status
d45f60c6 375bl_write_pagelist(struct nfs_pgio_header *header, int sync)
155e7524 376{
8067253c 377 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
5c83746a 378 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
650e2d39 379 struct bio *bio = NULL;
8067253c 380 struct pnfs_block_extent be;
3a6fd1f0 381 sector_t isect, extent_length = 0;
96c9eae6 382 struct parallel_io *par = NULL;
d45f60c6
WAA
383 loff_t offset = header->args.offset;
384 size_t count = header->args.count;
d45f60c6 385 struct page **pages = header->args.pages;
09cbfeaf 386 int pg_index = header->args.pgbase >> PAGE_SHIFT;
5c83746a 387 unsigned int pg_len;
be98fd0a 388 struct blk_plug plug;
8067253c 389 int i;
650e2d39 390
5b5e0928 391 dprintk("%s enter, %zu@%lld\n", __func__, count, offset);
96c9eae6 392
d45f60c6 393 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
71cdd40f
PT
394 * We want to write each, and if there is an error set pnfs_error
395 * to have it redone using nfs.
650e2d39 396 */
d45f60c6 397 par = alloc_parallel(header);
650e2d39 398 if (!par)
8067253c 399 return PNFS_NOT_ATTEMPTED;
650e2d39 400 par->pnfs_callback = bl_end_par_io_write;
650e2d39 401
3a6fd1f0 402 blk_start_plug(&plug);
71cdd40f 403
3a6fd1f0 404 /* we always write out the whole page */
09cbfeaf 405 offset = offset & (loff_t)PAGE_MASK;
3a6fd1f0 406 isect = offset >> SECTOR_SHIFT;
71cdd40f 407
d45f60c6 408 for (i = pg_index; i < header->page_array.npages; i++) {
921b81a8 409 if (extent_length <= 0) {
650e2d39 410 /* We've used up the previous extent */
4e49ea4a 411 bio = bl_submit_bio(bio);
650e2d39 412 /* Get the next one */
8067253c 413 if (!ext_tree_lookup(bl, isect, &be, true)) {
cd841605 414 header->pnfs_error = -EINVAL;
650e2d39
FI
415 goto out;
416 }
fe6e1e8d 417
8067253c 418 extent_length = be.be_length - (isect - be.be_f_offset);
71cdd40f 419 }
fe6e1e8d 420
09cbfeaf 421 pg_len = PAGE_SIZE;
d45f60c6 422 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
5d12ce77
BVA
423 REQ_OP_WRITE, isect, pages[i], &map,
424 &be, bl_end_io_write, par, 0, &pg_len);
71cdd40f 425 if (IS_ERR(bio)) {
cd841605 426 header->pnfs_error = PTR_ERR(bio);
e6d05a75 427 bio = NULL;
71cdd40f 428 goto out;
650e2d39 429 }
5c83746a
CH
430
431 offset += pg_len;
432 count -= pg_len;
433 isect += (pg_len >> SECTOR_SHIFT);
434 extent_length -= (pg_len >> SECTOR_SHIFT);
650e2d39 435 }
71cdd40f 436
d45f60c6 437 header->res.count = header->args.count;
650e2d39 438out:
4e49ea4a 439 bl_submit_bio(bio);
be98fd0a 440 blk_finish_plug(&plug);
650e2d39
FI
441 put_parallel(par);
442 return PNFS_ATTEMPTED;
155e7524
FI
443}
444
445static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
446{
447 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
8067253c 448 int err;
155e7524
FI
449
450 dprintk("%s enter\n", __func__);
8067253c
CH
451
452 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
453 WARN_ON(err);
454
cf6605d1 455 kfree_rcu(bl, bl_layout.plh_rcu);
155e7524
FI
456}
457
d9186c03
CH
458static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
459 gfp_t gfp_flags, bool is_scsi_layout)
155e7524
FI
460{
461 struct pnfs_block_layout *bl;
462
463 dprintk("%s enter\n", __func__);
464 bl = kzalloc(sizeof(*bl), gfp_flags);
465 if (!bl)
466 return NULL;
8067253c
CH
467
468 bl->bl_ext_rw = RB_ROOT;
469 bl->bl_ext_ro = RB_ROOT;
155e7524 470 spin_lock_init(&bl->bl_ext_lock);
8067253c 471
d9186c03 472 bl->bl_scsi_layout = is_scsi_layout;
155e7524
FI
473 return &bl->bl_layout;
474}
475
d9186c03
CH
476static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
477 gfp_t gfp_flags)
478{
479 return __bl_alloc_layout_hdr(inode, gfp_flags, false);
480}
481
482static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
483 gfp_t gfp_flags)
484{
485 return __bl_alloc_layout_hdr(inode, gfp_flags, true);
486}
487
a60d2ebd 488static void bl_free_lseg(struct pnfs_layout_segment *lseg)
155e7524 489{
a60d2ebd
FI
490 dprintk("%s enter\n", __func__);
491 kfree(lseg);
155e7524
FI
492}
493
9cc47541
CH
494/* Tracks info needed to ensure extents in layout obey constraints of spec */
495struct layout_verification {
496 u32 mode; /* R or RW */
497 u64 start; /* Expected start of next non-COW extent */
498 u64 inval; /* Start of INVAL coverage */
499 u64 cowread; /* End of COW read coverage */
500};
501
502/* Verify the extent meets the layout requirements of the pnfs-block draft,
503 * section 2.3.1.
504 */
505static int verify_extent(struct pnfs_block_extent *be,
506 struct layout_verification *lv)
507{
508 if (lv->mode == IOMODE_READ) {
509 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
510 be->be_state == PNFS_BLOCK_INVALID_DATA)
511 return -EIO;
512 if (be->be_f_offset != lv->start)
513 return -EIO;
514 lv->start += be->be_length;
515 return 0;
516 }
517 /* lv->mode == IOMODE_RW */
518 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
519 if (be->be_f_offset != lv->start)
520 return -EIO;
521 if (lv->cowread > lv->start)
522 return -EIO;
523 lv->start += be->be_length;
524 lv->inval = lv->start;
525 return 0;
526 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
527 if (be->be_f_offset != lv->start)
528 return -EIO;
529 lv->start += be->be_length;
530 return 0;
531 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
532 if (be->be_f_offset > lv->start)
533 return -EIO;
534 if (be->be_f_offset < lv->inval)
535 return -EIO;
536 if (be->be_f_offset < lv->cowread)
537 return -EIO;
538 /* It looks like you might want to min this with lv->start,
539 * but you really don't.
540 */
541 lv->inval = lv->inval + be->be_length;
542 lv->cowread = be->be_f_offset + be->be_length;
543 return 0;
544 } else
545 return -EIO;
546}
547
548static int decode_sector_number(__be32 **rp, sector_t *sp)
549{
550 uint64_t s;
551
552 *rp = xdr_decode_hyper(*rp, &s);
553 if (s & 0x1ff) {
554 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
555 return -1;
556 }
557 *sp = s >> SECTOR_SHIFT;
558 return 0;
559}
560
b3dce6a2
BC
561static struct nfs4_deviceid_node *
562bl_find_get_deviceid(struct nfs_server *server,
a52458b4 563 const struct nfs4_deviceid *id, const struct cred *cred,
b3dce6a2
BC
564 gfp_t gfp_mask)
565{
566 struct nfs4_deviceid_node *node;
d869da91 567 int err = -ENODEV;
b3dce6a2
BC
568
569retry:
570 node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
571 if (!node)
572 return ERR_PTR(-ENODEV);
573
614733f9
BC
574 /*
575 * Devices that are marked unavailable are left in the cache with a
576 * timeout to avoid sending GETDEVINFO after every LAYOUTGET, or
577 * constantly attempting to register the device. Once marked as
578 * unavailable they must be deleted and never reused.
579 */
d869da91
CL
580 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
581 unsigned long end = jiffies;
582 unsigned long start = end - PNFS_DEVICE_RETRY_TIMEOUT;
b3dce6a2 583
d869da91 584 if (!time_in_range(node->timestamp_unavailable, start, end)) {
614733f9 585 /* Uncork subsequent GETDEVINFO operations for this device */
d869da91
CL
586 nfs4_delete_deviceid(node->ld, node->nfs_client, id);
587 goto retry;
588 }
589 goto out_put;
b3dce6a2 590 }
1530827b 591
614733f9
BC
592 if (!bl_register_dev(container_of(node, struct pnfs_block_dev, node))) {
593 /*
594 * If we cannot register, treat this device as transient:
595 * Make a negative cache entry for the device
596 */
597 nfs4_mark_deviceid_unavailable(node);
d869da91 598 goto out_put;
614733f9 599 }
d869da91
CL
600
601 return node;
602
603out_put:
1530827b 604 nfs4_put_deviceid_node(node);
d869da91 605 return ERR_PTR(err);
b3dce6a2
BC
606}
607
9cc47541 608static int
ca0fe1df
CH
609bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
610 struct layout_verification *lv, struct list_head *extents,
611 gfp_t gfp_mask)
9cc47541 612{
ca0fe1df
CH
613 struct pnfs_block_extent *be;
614 struct nfs4_deviceid id;
615 int error;
9cc47541 616 __be32 *p;
ca0fe1df
CH
617
618 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
619 if (!p)
620 return -EIO;
621
622 be = kzalloc(sizeof(*be), GFP_NOFS);
623 if (!be)
624 return -ENOMEM;
625
626 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
627 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
628
b3dce6a2 629 be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
ca0fe1df 630 lo->plh_lc_cred, gfp_mask);
b3dce6a2
BC
631 if (IS_ERR(be->be_device)) {
632 error = PTR_ERR(be->be_device);
ca0fe1df 633 goto out_free_be;
b3dce6a2 634 }
ca0fe1df
CH
635
636 /*
637 * The next three values are read in as bytes, but stored in the
638 * extent structure in 512-byte granularity.
639 */
b3dce6a2 640 error = -EIO;
ca0fe1df
CH
641 if (decode_sector_number(&p, &be->be_f_offset) < 0)
642 goto out_put_deviceid;
643 if (decode_sector_number(&p, &be->be_length) < 0)
644 goto out_put_deviceid;
645 if (decode_sector_number(&p, &be->be_v_offset) < 0)
646 goto out_put_deviceid;
647 be->be_state = be32_to_cpup(p++);
648
649 error = verify_extent(be, lv);
650 if (error) {
651 dprintk("%s: extent verification failed\n", __func__);
652 goto out_put_deviceid;
653 }
654
655 list_add_tail(&be->be_list, extents);
656 return 0;
657
658out_put_deviceid:
659 nfs4_put_deviceid_node(be->be_device);
660out_free_be:
661 kfree(be);
662 return error;
663}
664
665static struct pnfs_layout_segment *
666bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
667 gfp_t gfp_mask)
668{
9cc47541
CH
669 struct layout_verification lv = {
670 .mode = lgr->range.iomode,
671 .start = lgr->range.offset >> SECTOR_SHIFT,
672 .inval = lgr->range.offset >> SECTOR_SHIFT,
673 .cowread = lgr->range.offset >> SECTOR_SHIFT,
674 };
ca0fe1df
CH
675 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
676 struct pnfs_layout_segment *lseg;
677 struct xdr_buf buf;
678 struct xdr_stream xdr;
679 struct page *scratch;
680 int status, i;
681 uint32_t count;
682 __be32 *p;
9cc47541
CH
683 LIST_HEAD(extents);
684
685 dprintk("---> %s\n", __func__);
686
ca0fe1df
CH
687 lseg = kzalloc(sizeof(*lseg), gfp_mask);
688 if (!lseg)
689 return ERR_PTR(-ENOMEM);
690
691 status = -ENOMEM;
692 scratch = alloc_page(gfp_mask);
9cc47541 693 if (!scratch)
ca0fe1df 694 goto out;
9cc47541 695
ca0fe1df
CH
696 xdr_init_decode_pages(&xdr, &buf,
697 lgr->layoutp->pages, lgr->layoutp->len);
0ae4c3e8 698 xdr_set_scratch_page(&xdr, scratch);
9cc47541 699
ca0fe1df
CH
700 status = -EIO;
701 p = xdr_inline_decode(&xdr, 4);
9cc47541 702 if (unlikely(!p))
ca0fe1df 703 goto out_free_scratch;
9cc47541
CH
704
705 count = be32_to_cpup(p++);
ca0fe1df 706 dprintk("%s: number of extents %d\n", __func__, count);
9cc47541 707
ca0fe1df
CH
708 /*
709 * Decode individual extents, putting them in temporary staging area
710 * until whole layout is decoded to make error recovery easier.
9cc47541
CH
711 */
712 for (i = 0; i < count; i++) {
ca0fe1df
CH
713 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
714 if (status)
715 goto process_extents;
9cc47541 716 }
ca0fe1df 717
9cc47541
CH
718 if (lgr->range.offset + lgr->range.length !=
719 lv.start << SECTOR_SHIFT) {
720 dprintk("%s Final length mismatch\n", __func__);
ca0fe1df
CH
721 status = -EIO;
722 goto process_extents;
9cc47541 723 }
ca0fe1df 724
9cc47541
CH
725 if (lv.start < lv.cowread) {
726 dprintk("%s Final uncovered COW extent\n", __func__);
ca0fe1df 727 status = -EIO;
9cc47541 728 }
9cc47541 729
ca0fe1df 730process_extents:
9cc47541 731 while (!list_empty(&extents)) {
ca0fe1df
CH
732 struct pnfs_block_extent *be =
733 list_first_entry(&extents, struct pnfs_block_extent,
734 be_list);
9cc47541 735 list_del(&be->be_list);
9cc47541 736
ca0fe1df
CH
737 if (!status)
738 status = ext_tree_insert(bl, be);
a60d2ebd 739
ca0fe1df
CH
740 if (status) {
741 nfs4_put_deviceid_node(be->be_device);
742 kfree(be);
743 }
744 }
745
746out_free_scratch:
747 __free_page(scratch);
748out:
749 dprintk("%s returns %d\n", __func__, status);
b3dce6a2
BC
750 switch (status) {
751 case -ENODEV:
752 /* Our extent block devices are unavailable */
753 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
df561f66 754 fallthrough;
b3dce6a2
BC
755 case 0:
756 return lseg;
757 default:
a60d2ebd
FI
758 kfree(lseg);
759 return ERR_PTR(status);
760 }
155e7524
FI
761}
762
71d5b763
CH
763static void
764bl_return_range(struct pnfs_layout_hdr *lo,
765 struct pnfs_layout_range *range)
766{
767 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
768 sector_t offset = range->offset >> SECTOR_SHIFT, end;
71d5b763
CH
769
770 if (range->offset % 8) {
771 dprintk("%s: offset %lld not block size aligned\n",
772 __func__, range->offset);
773 return;
774 }
775
776 if (range->length != NFS4_MAX_UINT64) {
777 if (range->length % 8) {
778 dprintk("%s: length %lld not block size aligned\n",
779 __func__, range->length);
780 return;
781 }
782
783 end = offset + (range->length >> SECTOR_SHIFT);
784 } else {
785 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
786 }
787
164ae58c 788 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
71d5b763
CH
789}
790
34dc93c2
CH
791static int
792bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
155e7524 793{
34dc93c2 794 return ext_tree_prepare_commit(arg);
155e7524
FI
795}
796
797static void
798bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
799{
34dc93c2 800 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
155e7524
FI
801}
802
803static int
804bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
805{
806 dprintk("%s enter\n", __func__);
2f9fd182
FI
807
808 if (server->pnfs_blksize == 0) {
809 dprintk("%s Server did not return blksize\n", __func__);
810 return -EINVAL;
811 }
e3aaf7f2
CH
812 if (server->pnfs_blksize > PAGE_SIZE) {
813 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
814 __func__, server->pnfs_blksize);
815 return -EINVAL;
816 }
817
d4b18c3e 818 return 0;
155e7524
FI
819}
820
f742dc4a 821static bool
3a6fd1f0 822is_aligned_req(struct nfs_pageio_descriptor *pgio,
f35592a9 823 struct nfs_page *req, unsigned int alignment, bool is_write)
f742dc4a 824{
3a6fd1f0
CH
825 /*
826 * Always accept buffered writes, higher layers take care of the
827 * right alignment.
828 */
829 if (pgio->pg_dreq == NULL)
830 return true;
831
832 if (!IS_ALIGNED(req->wb_offset, alignment))
833 return false;
834
835 if (IS_ALIGNED(req->wb_bytes, alignment))
836 return true;
837
f35592a9
KM
838 if (is_write &&
839 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
3a6fd1f0
CH
840 /*
841 * If the write goes up to the inode size, just write
842 * the full page. Data past the inode size is
843 * guaranteed to be zeroed by the higher level client
844 * code, and this behaviour is mandated by RFC 5663
845 * section 2.3.2.
846 */
847 return true;
848 }
849
850 return false;
f742dc4a
PT
851}
852
853static void
854bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
855{
f35592a9 856 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
f742dc4a 857 nfs_pageio_reset_read_mds(pgio);
3a6fd1f0
CH
858 return;
859 }
860
861 pnfs_generic_pg_init_read(pgio, req);
b3dce6a2
BC
862
863 if (pgio->pg_lseg &&
864 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
865 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
866 pnfs_set_lo_fail(pgio->pg_lseg);
867 nfs_pageio_reset_read_mds(pgio);
868 }
f742dc4a
PT
869}
870
b4fdac1a
WAA
871/*
872 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
873 * of bytes (maximum @req->wb_bytes) that can be coalesced.
874 */
875static size_t
f742dc4a
PT
876bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
877 struct nfs_page *req)
878{
f35592a9 879 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
b4fdac1a 880 return 0;
f742dc4a
PT
881 return pnfs_generic_pg_test(pgio, prev, req);
882}
883
6296556f
PT
884/*
885 * Return the number of contiguous bytes for a given inode
886 * starting at page frame idx.
887 */
888static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
889{
890 struct address_space *mapping = inode->i_mapping;
891 pgoff_t end;
892
893 /* Optimize common case that writes from 0 to end of file */
09cbfeaf 894 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
6a74c0c9 895 if (end != inode->i_mapping->nrpages) {
6296556f 896 rcu_read_lock();
0d3f9296 897 end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
6296556f
PT
898 rcu_read_unlock();
899 }
900
901 if (!end)
09cbfeaf 902 return i_size_read(inode) - (idx << PAGE_SHIFT);
6296556f 903 else
09cbfeaf 904 return (end - idx) << PAGE_SHIFT;
6296556f
PT
905}
906
6f018efa 907static void
96c9eae6
PT
908bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
909{
3a6fd1f0
CH
910 u64 wb_size;
911
f35592a9 912 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
96c9eae6 913 nfs_pageio_reset_write_mds(pgio);
3a6fd1f0 914 return;
6296556f 915 }
3a6fd1f0
CH
916
917 if (pgio->pg_dreq == NULL)
8a6291bf 918 wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index);
3a6fd1f0 919 else
8a6291bf 920 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq, req_offset(req));
3a6fd1f0
CH
921
922 pnfs_generic_pg_init_write(pgio, req, wb_size);
b3dce6a2
BC
923
924 if (pgio->pg_lseg &&
925 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
926
927 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
928 pnfs_set_lo_fail(pgio->pg_lseg);
929 nfs_pageio_reset_write_mds(pgio);
930 }
96c9eae6
PT
931}
932
b4fdac1a
WAA
933/*
934 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
935 * of bytes (maximum @req->wb_bytes) that can be coalesced.
936 */
937static size_t
96c9eae6
PT
938bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
939 struct nfs_page *req)
940{
f35592a9 941 if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
b4fdac1a 942 return 0;
96c9eae6
PT
943 return pnfs_generic_pg_test(pgio, prev, req);
944}
945
e9643fe8 946static const struct nfs_pageio_ops bl_pg_read_ops = {
f742dc4a
PT
947 .pg_init = bl_pg_init_read,
948 .pg_test = bl_pg_test_read,
e9643fe8 949 .pg_doio = pnfs_generic_pg_readpages,
180bb5ec 950 .pg_cleanup = pnfs_generic_pg_cleanup,
e9643fe8
BH
951};
952
953static const struct nfs_pageio_ops bl_pg_write_ops = {
96c9eae6
PT
954 .pg_init = bl_pg_init_write,
955 .pg_test = bl_pg_test_write,
e9643fe8 956 .pg_doio = pnfs_generic_pg_writepages,
180bb5ec 957 .pg_cleanup = pnfs_generic_pg_cleanup,
e9643fe8
BH
958};
959
155e7524
FI
960static struct pnfs_layoutdriver_type blocklayout_type = {
961 .id = LAYOUT_BLOCK_VOLUME,
962 .name = "LAYOUT_BLOCK_VOLUME",
5a12cca6 963 .owner = THIS_MODULE,
848746bd 964 .flags = PNFS_LAYOUTRET_ON_SETATTR |
d78471d3 965 PNFS_LAYOUTRET_ON_ERROR |
848746bd 966 PNFS_READ_WHOLE_PAGE,
155e7524
FI
967 .read_pagelist = bl_read_pagelist,
968 .write_pagelist = bl_write_pagelist,
969 .alloc_layout_hdr = bl_alloc_layout_hdr,
970 .free_layout_hdr = bl_free_layout_hdr,
971 .alloc_lseg = bl_alloc_lseg,
972 .free_lseg = bl_free_lseg,
71d5b763 973 .return_range = bl_return_range,
34dc93c2 974 .prepare_layoutcommit = bl_prepare_layoutcommit,
155e7524
FI
975 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
976 .set_layoutdriver = bl_set_layoutdriver,
20d655d6
CH
977 .alloc_deviceid_node = bl_alloc_deviceid_node,
978 .free_deviceid_node = bl_free_deviceid_node,
e9643fe8
BH
979 .pg_read_ops = &bl_pg_read_ops,
980 .pg_write_ops = &bl_pg_write_ops,
5bb89b47 981 .sync = pnfs_generic_sync,
155e7524
FI
982};
983
d9186c03
CH
984static struct pnfs_layoutdriver_type scsilayout_type = {
985 .id = LAYOUT_SCSI,
986 .name = "LAYOUT_SCSI",
987 .owner = THIS_MODULE,
988 .flags = PNFS_LAYOUTRET_ON_SETATTR |
d78471d3 989 PNFS_LAYOUTRET_ON_ERROR |
d9186c03
CH
990 PNFS_READ_WHOLE_PAGE,
991 .read_pagelist = bl_read_pagelist,
992 .write_pagelist = bl_write_pagelist,
993 .alloc_layout_hdr = sl_alloc_layout_hdr,
994 .free_layout_hdr = bl_free_layout_hdr,
995 .alloc_lseg = bl_alloc_lseg,
996 .free_lseg = bl_free_lseg,
997 .return_range = bl_return_range,
998 .prepare_layoutcommit = bl_prepare_layoutcommit,
999 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
1000 .set_layoutdriver = bl_set_layoutdriver,
1001 .alloc_deviceid_node = bl_alloc_deviceid_node,
1002 .free_deviceid_node = bl_free_deviceid_node,
1003 .pg_read_ops = &bl_pg_read_ops,
1004 .pg_write_ops = &bl_pg_write_ops,
1005 .sync = pnfs_generic_sync,
1006};
1007
1008
155e7524
FI
1009static int __init nfs4blocklayout_init(void)
1010{
1011 int ret;
1012
1013 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1014
d9186c03 1015 ret = bl_init_pipefs();
fe0a9b74
JR
1016 if (ret)
1017 goto out;
d9186c03
CH
1018
1019 ret = pnfs_register_layoutdriver(&blocklayout_type);
627f3066 1020 if (ret)
d9186c03
CH
1021 goto out_cleanup_pipe;
1022
1023 ret = pnfs_register_layoutdriver(&scsilayout_type);
1024 if (ret)
1025 goto out_unregister_block;
871760ce 1026 return 0;
fe0a9b74 1027
d9186c03 1028out_unregister_block:
fe0a9b74 1029 pnfs_unregister_layoutdriver(&blocklayout_type);
d9186c03
CH
1030out_cleanup_pipe:
1031 bl_cleanup_pipefs();
871760ce 1032out:
155e7524
FI
1033 return ret;
1034}
1035
1036static void __exit nfs4blocklayout_exit(void)
1037{
1038 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1039 __func__);
1040
d9186c03 1041 pnfs_unregister_layoutdriver(&scsilayout_type);
155e7524 1042 pnfs_unregister_layoutdriver(&blocklayout_type);
d9186c03 1043 bl_cleanup_pipefs();
155e7524
FI
1044}
1045
1046MODULE_ALIAS("nfs-layouttype4-3");
ad6b0241 1047MODULE_ALIAS("nfs-layouttype4-5");
155e7524
FI
1048
1049module_init(nfs4blocklayout_init);
1050module_exit(nfs4blocklayout_exit);