nfs: rename members of nfs_pgio_data
[linux-2.6-block.git] / fs / nfs / blocklayout / blocklayout.c
CommitLineData
155e7524
FI
1/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
9549ec01 32
155e7524
FI
33#include <linux/module.h>
34#include <linux/init.h>
fe0a9b74
JR
35#include <linux/mount.h>
36#include <linux/namei.h>
9549ec01 37#include <linux/bio.h> /* struct bio */
71cdd40f 38#include <linux/buffer_head.h> /* various write calls */
88c9e421 39#include <linux/prefetch.h>
6296556f 40#include <linux/pagevec.h>
155e7524 41
10bd295a 42#include "../pnfs.h"
76e697ba 43#include "../nfs4session.h"
10bd295a 44#include "../internal.h"
155e7524
FI
45#include "blocklayout.h"
46
47#define NFSDBG_FACILITY NFSDBG_PNFS_LD
48
49MODULE_LICENSE("GPL");
50MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
51MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
52
9549ec01
FI
53static void print_page(struct page *page)
54{
55 dprintk("PRINTPAGE page %p\n", page);
56 dprintk(" PagePrivate %d\n", PagePrivate(page));
57 dprintk(" PageUptodate %d\n", PageUptodate(page));
58 dprintk(" PageError %d\n", PageError(page));
59 dprintk(" PageDirty %d\n", PageDirty(page));
60 dprintk(" PageReferenced %d\n", PageReferenced(page));
61 dprintk(" PageLocked %d\n", PageLocked(page));
62 dprintk(" PageWriteback %d\n", PageWriteback(page));
63 dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
64 dprintk("\n");
65}
66
67/* Given the be associated with isect, determine if page data needs to be
68 * initialized.
69 */
70static int is_hole(struct pnfs_block_extent *be, sector_t isect)
71{
72 if (be->be_state == PNFS_BLOCK_NONE_DATA)
73 return 1;
74 else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
75 return 0;
76 else
77 return !bl_is_sector_init(be->be_inval, isect);
78}
79
650e2d39
FI
80/* Given the be associated with isect, determine if page data can be
81 * written to disk.
82 */
83static int is_writable(struct pnfs_block_extent *be, sector_t isect)
84{
71cdd40f
PT
85 return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
86 be->be_state == PNFS_BLOCK_INVALID_DATA);
650e2d39
FI
87}
88
9549ec01
FI
89/* The data we are handed might be spread across several bios. We need
90 * to track when the last one is finished.
91 */
92struct parallel_io {
93 struct kref refcnt;
7c5465d6 94 void (*pnfs_callback) (void *data, int num_se);
9549ec01 95 void *data;
7c5465d6 96 int bse_count;
9549ec01
FI
97};
98
99static inline struct parallel_io *alloc_parallel(void *data)
100{
101 struct parallel_io *rv;
102
103 rv = kmalloc(sizeof(*rv), GFP_NOFS);
104 if (rv) {
105 rv->data = data;
106 kref_init(&rv->refcnt);
7c5465d6 107 rv->bse_count = 0;
9549ec01
FI
108 }
109 return rv;
110}
111
112static inline void get_parallel(struct parallel_io *p)
113{
114 kref_get(&p->refcnt);
115}
116
117static void destroy_parallel(struct kref *kref)
118{
119 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
120
121 dprintk("%s enter\n", __func__);
7c5465d6 122 p->pnfs_callback(p->data, p->bse_count);
9549ec01
FI
123 kfree(p);
124}
125
126static inline void put_parallel(struct parallel_io *p)
127{
128 kref_put(&p->refcnt, destroy_parallel);
129}
130
131static struct bio *
132bl_submit_bio(int rw, struct bio *bio)
133{
134 if (bio) {
135 get_parallel(bio->bi_private);
136 dprintk("%s submitting %s bio %u@%llu\n", __func__,
4f024f37
KO
137 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
138 (unsigned long long)bio->bi_iter.bi_sector);
9549ec01
FI
139 submit_bio(rw, bio);
140 }
141 return NULL;
142}
143
144static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
145 struct pnfs_block_extent *be,
146 void (*end_io)(struct bio *, int err),
147 struct parallel_io *par)
148{
149 struct bio *bio;
150
74a6eeb4 151 npg = min(npg, BIO_MAX_PAGES);
9549ec01 152 bio = bio_alloc(GFP_NOIO, npg);
74a6eeb4
PT
153 if (!bio && (current->flags & PF_MEMALLOC)) {
154 while (!bio && (npg /= 2))
155 bio = bio_alloc(GFP_NOIO, npg);
156 }
9549ec01 157
74a6eeb4 158 if (bio) {
4f024f37
KO
159 bio->bi_iter.bi_sector = isect - be->be_f_offset +
160 be->be_v_offset;
74a6eeb4
PT
161 bio->bi_bdev = be->be_mdev;
162 bio->bi_end_io = end_io;
163 bio->bi_private = par;
164 }
9549ec01
FI
165 return bio;
166}
167
fe6e1e8d 168static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
9549ec01
FI
169 sector_t isect, struct page *page,
170 struct pnfs_block_extent *be,
171 void (*end_io)(struct bio *, int err),
fe6e1e8d
PT
172 struct parallel_io *par,
173 unsigned int offset, int len)
9549ec01 174{
fe6e1e8d
PT
175 isect = isect + (offset >> SECTOR_SHIFT);
176 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
177 npg, rw, (unsigned long long)isect, offset, len);
9549ec01
FI
178retry:
179 if (!bio) {
180 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
181 if (!bio)
182 return ERR_PTR(-ENOMEM);
183 }
fe6e1e8d 184 if (bio_add_page(bio, page, len, offset) < len) {
9549ec01
FI
185 bio = bl_submit_bio(rw, bio);
186 goto retry;
187 }
188 return bio;
189}
190
fe6e1e8d
PT
191static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
192 sector_t isect, struct page *page,
193 struct pnfs_block_extent *be,
194 void (*end_io)(struct bio *, int err),
195 struct parallel_io *par)
196{
197 return do_add_page_to_bio(bio, npg, rw, isect, page, be,
198 end_io, par, 0, PAGE_CACHE_SIZE);
199}
200
9549ec01
FI
201/* This is basically copied from mpage_end_io_read */
202static void bl_end_io_read(struct bio *bio, int err)
203{
204 struct parallel_io *par = bio->bi_private;
2c30c71b
KO
205 struct bio_vec *bvec;
206 int i;
9549ec01 207
2c30c71b
KO
208 if (!err)
209 bio_for_each_segment_all(bvec, bio, i)
210 SetPageUptodate(bvec->bv_page);
9549ec01 211
2c30c71b 212 if (err) {
9c7e1b3d 213 struct nfs_pgio_data *rdata = par->data;
cd841605
FI
214 struct nfs_pgio_header *header = rdata->header;
215
216 if (!header->pnfs_error)
217 header->pnfs_error = -EIO;
218 pnfs_set_lo_fail(header->lseg);
9549ec01
FI
219 }
220 bio_put(bio);
221 put_parallel(par);
222}
223
224static void bl_read_cleanup(struct work_struct *work)
225{
226 struct rpc_task *task;
9c7e1b3d 227 struct nfs_pgio_data *rdata;
9549ec01
FI
228 dprintk("%s enter\n", __func__);
229 task = container_of(work, struct rpc_task, u.tk_work);
9c7e1b3d 230 rdata = container_of(task, struct nfs_pgio_data, task);
9549ec01
FI
231 pnfs_ld_read_done(rdata);
232}
233
234static void
7c5465d6 235bl_end_par_io_read(void *data, int unused)
9549ec01 236{
9c7e1b3d 237 struct nfs_pgio_data *rdata = data;
9549ec01 238
cd841605 239 rdata->task.tk_status = rdata->header->pnfs_error;
9549ec01
FI
240 INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
241 schedule_work(&rdata->task.u.tk_work);
242}
243
155e7524 244static enum pnfs_try_status
9c7e1b3d 245bl_read_pagelist(struct nfs_pgio_data *rdata)
155e7524 246{
cd841605 247 struct nfs_pgio_header *header = rdata->header;
9549ec01
FI
248 int i, hole;
249 struct bio *bio = NULL;
250 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
251 sector_t isect, extent_length = 0;
252 struct parallel_io *par;
253 loff_t f_offset = rdata->args.offset;
f742dc4a
PT
254 size_t bytes_left = rdata->args.count;
255 unsigned int pg_offset, pg_len;
9549ec01
FI
256 struct page **pages = rdata->args.pages;
257 int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
f742dc4a 258 const bool is_dio = (header->dreq != NULL);
9549ec01 259
6f00866d 260 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
823b0c9d
WAA
261 rdata->page_array.npages, f_offset,
262 (unsigned int)rdata->args.count);
9549ec01
FI
263
264 par = alloc_parallel(rdata);
265 if (!par)
266 goto use_mds;
9549ec01
FI
267 par->pnfs_callback = bl_end_par_io_read;
268 /* At this point, we can no longer jump to use_mds */
269
270 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
271 /* Code assumes extents are page-aligned */
823b0c9d 272 for (i = pg_index; i < rdata->page_array.npages; i++) {
9549ec01
FI
273 if (!extent_length) {
274 /* We've used up the previous extent */
275 bl_put_extent(be);
276 bl_put_extent(cow_read);
277 bio = bl_submit_bio(READ, bio);
278 /* Get the next one */
cd841605 279 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
9549ec01
FI
280 isect, &cow_read);
281 if (!be) {
cd841605 282 header->pnfs_error = -EIO;
9549ec01
FI
283 goto out;
284 }
285 extent_length = be->be_length -
286 (isect - be->be_f_offset);
287 if (cow_read) {
288 sector_t cow_length = cow_read->be_length -
289 (isect - cow_read->be_f_offset);
290 extent_length = min(extent_length, cow_length);
291 }
292 }
f742dc4a
PT
293
294 if (is_dio) {
295 pg_offset = f_offset & ~PAGE_CACHE_MASK;
296 if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
297 pg_len = PAGE_CACHE_SIZE - pg_offset;
298 else
299 pg_len = bytes_left;
300
301 f_offset += pg_len;
302 bytes_left -= pg_len;
303 isect += (pg_offset >> SECTOR_SHIFT);
304 } else {
305 pg_offset = 0;
306 pg_len = PAGE_CACHE_SIZE;
307 }
308
9549ec01
FI
309 hole = is_hole(be, isect);
310 if (hole && !cow_read) {
311 bio = bl_submit_bio(READ, bio);
312 /* Fill hole w/ zeroes w/o accessing device */
313 dprintk("%s Zeroing page for hole\n", __func__);
f742dc4a 314 zero_user_segment(pages[i], pg_offset, pg_len);
9549ec01
FI
315 print_page(pages[i]);
316 SetPageUptodate(pages[i]);
317 } else {
318 struct pnfs_block_extent *be_read;
319
320 be_read = (hole && cow_read) ? cow_read : be;
823b0c9d
WAA
321 bio = do_add_page_to_bio(bio,
322 rdata->page_array.npages - i,
30dd374f 323 READ,
9549ec01 324 isect, pages[i], be_read,
f742dc4a
PT
325 bl_end_io_read, par,
326 pg_offset, pg_len);
9549ec01 327 if (IS_ERR(bio)) {
cd841605 328 header->pnfs_error = PTR_ERR(bio);
e6d05a75 329 bio = NULL;
9549ec01
FI
330 goto out;
331 }
332 }
f742dc4a 333 isect += (pg_len >> SECTOR_SHIFT);
9549ec01
FI
334 extent_length -= PAGE_CACHE_SECTORS;
335 }
cd841605 336 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
9549ec01 337 rdata->res.eof = 1;
f742dc4a 338 rdata->res.count = header->inode->i_size - rdata->args.offset;
9549ec01 339 } else {
f742dc4a 340 rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset;
9549ec01
FI
341 }
342out:
343 bl_put_extent(be);
344 bl_put_extent(cow_read);
345 bl_submit_bio(READ, bio);
346 put_parallel(par);
347 return PNFS_ATTEMPTED;
348
349 use_mds:
350 dprintk("Giving up and using normal NFS\n");
155e7524
FI
351 return PNFS_NOT_ATTEMPTED;
352}
353
31e6306a
FI
354static void mark_extents_written(struct pnfs_block_layout *bl,
355 __u64 offset, __u32 count)
356{
357 sector_t isect, end;
358 struct pnfs_block_extent *be;
7c5465d6 359 struct pnfs_block_short_extent *se;
31e6306a
FI
360
361 dprintk("%s(%llu, %u)\n", __func__, offset, count);
362 if (count == 0)
363 return;
364 isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
365 end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
366 end >>= SECTOR_SHIFT;
367 while (isect < end) {
368 sector_t len;
369 be = bl_find_get_extent(bl, isect, NULL);
370 BUG_ON(!be); /* FIXME */
371 len = min(end, be->be_f_offset + be->be_length) - isect;
7c5465d6
PT
372 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
373 se = bl_pop_one_short_extent(be->be_inval);
374 BUG_ON(!se);
375 bl_mark_for_commit(be, isect, len, se);
376 }
31e6306a
FI
377 isect += len;
378 bl_put_extent(be);
379 }
380}
381
71cdd40f
PT
382static void bl_end_io_write_zero(struct bio *bio, int err)
383{
384 struct parallel_io *par = bio->bi_private;
2c30c71b
KO
385 struct bio_vec *bvec;
386 int i;
71cdd40f 387
2c30c71b 388 bio_for_each_segment_all(bvec, bio, i) {
71cdd40f 389 /* This is the zeroing page we added */
2c30c71b
KO
390 end_page_writeback(bvec->bv_page);
391 page_cache_release(bvec->bv_page);
392 }
7c5465d6 393
2c30c71b 394 if (unlikely(err)) {
9c7e1b3d 395 struct nfs_pgio_data *data = par->data;
cd841605
FI
396 struct nfs_pgio_header *header = data->header;
397
398 if (!header->pnfs_error)
399 header->pnfs_error = -EIO;
400 pnfs_set_lo_fail(header->lseg);
71cdd40f
PT
401 }
402 bio_put(bio);
403 put_parallel(par);
404}
405
650e2d39
FI
406static void bl_end_io_write(struct bio *bio, int err)
407{
408 struct parallel_io *par = bio->bi_private;
409 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
9c7e1b3d 410 struct nfs_pgio_data *data = par->data;
cd841605 411 struct nfs_pgio_header *header = data->header;
650e2d39
FI
412
413 if (!uptodate) {
cd841605
FI
414 if (!header->pnfs_error)
415 header->pnfs_error = -EIO;
416 pnfs_set_lo_fail(header->lseg);
650e2d39
FI
417 }
418 bio_put(bio);
419 put_parallel(par);
420}
421
422/* Function scheduled for call during bl_end_par_io_write,
423 * it marks sectors as written and extends the commitlist.
424 */
425static void bl_write_cleanup(struct work_struct *work)
426{
427 struct rpc_task *task;
9c7e1b3d 428 struct nfs_pgio_data *wdata;
650e2d39
FI
429 dprintk("%s enter\n", __func__);
430 task = container_of(work, struct rpc_task, u.tk_work);
9c7e1b3d 431 wdata = container_of(task, struct nfs_pgio_data, task);
cd841605 432 if (likely(!wdata->header->pnfs_error)) {
31e6306a 433 /* Marks for LAYOUTCOMMIT */
cd841605 434 mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
31e6306a
FI
435 wdata->args.offset, wdata->args.count);
436 }
650e2d39
FI
437 pnfs_ld_write_done(wdata);
438}
439
440/* Called when last of bios associated with a bl_write_pagelist call finishes */
7c5465d6 441static void bl_end_par_io_write(void *data, int num_se)
650e2d39 442{
9c7e1b3d 443 struct nfs_pgio_data *wdata = data;
650e2d39 444
cd841605
FI
445 if (unlikely(wdata->header->pnfs_error)) {
446 bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
7c5465d6
PT
447 num_se);
448 }
449
cd841605 450 wdata->task.tk_status = wdata->header->pnfs_error;
823b0c9d 451 wdata->writeverf.committed = NFS_FILE_SYNC;
650e2d39
FI
452 INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
453 schedule_work(&wdata->task.u.tk_work);
454}
455
71cdd40f
PT
456/* FIXME STUB - mark intersection of layout and page as bad, so is not
457 * used again.
458 */
459static void mark_bad_read(void)
460{
461 return;
462}
463
464/*
465 * map_block: map a requested I/0 block (isect) into an offset in the LVM
466 * block_device
467 */
468static void
469map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
470{
471 dprintk("%s enter be=%p\n", __func__, be);
472
473 set_buffer_mapped(bh);
474 bh->b_bdev = be->be_mdev;
475 bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
476 (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
477
478 dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
479 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
480 bh->b_size);
481 return;
482}
483
fe6e1e8d
PT
484static void
485bl_read_single_end_io(struct bio *bio, int error)
486{
487 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
488 struct page *page = bvec->bv_page;
489
490 /* Only one page in bvec */
491 unlock_page(page);
492}
493
494static int
495bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
496 unsigned int offset, unsigned int len)
497{
498 struct bio *bio;
499 struct page *shadow_page;
500 sector_t isect;
501 char *kaddr, *kshadow_addr;
502 int ret = 0;
503
504 dprintk("%s: offset %u len %u\n", __func__, offset, len);
505
506 shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
507 if (shadow_page == NULL)
508 return -ENOMEM;
509
510 bio = bio_alloc(GFP_NOIO, 1);
511 if (bio == NULL)
512 return -ENOMEM;
513
514 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
515 (offset / SECTOR_SIZE);
516
4f024f37 517 bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
fe6e1e8d
PT
518 bio->bi_bdev = be->be_mdev;
519 bio->bi_end_io = bl_read_single_end_io;
520
521 lock_page(shadow_page);
522 if (bio_add_page(bio, shadow_page,
523 SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
524 unlock_page(shadow_page);
525 bio_put(bio);
526 return -EIO;
527 }
528
529 submit_bio(READ, bio);
530 wait_on_page_locked(shadow_page);
531 if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
532 ret = -EIO;
533 } else {
534 kaddr = kmap_atomic(page);
535 kshadow_addr = kmap_atomic(shadow_page);
536 memcpy(kaddr + offset, kshadow_addr + offset, len);
537 kunmap_atomic(kshadow_addr);
538 kunmap_atomic(kaddr);
539 }
540 __free_page(shadow_page);
541 bio_put(bio);
542
543 return ret;
544}
545
546static int
547bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
548 unsigned int dirty_offset, unsigned int dirty_len,
549 bool full_page)
550{
551 int ret = 0;
552 unsigned int start, end;
553
554 if (full_page) {
555 start = 0;
556 end = PAGE_CACHE_SIZE;
557 } else {
558 start = round_down(dirty_offset, SECTOR_SIZE);
559 end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
560 }
561
562 dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
563 if (!be) {
564 zero_user_segments(page, start, dirty_offset,
565 dirty_offset + dirty_len, end);
566 if (start == 0 && end == PAGE_CACHE_SIZE &&
567 trylock_page(page)) {
568 SetPageUptodate(page);
569 unlock_page(page);
570 }
571 return ret;
572 }
573
574 if (start != dirty_offset)
575 ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
576
577 if (!ret && (dirty_offset + dirty_len < end))
578 ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
579 end - dirty_offset - dirty_len);
580
581 return ret;
582}
583
71cdd40f
PT
584/* Given an unmapped page, zero it or read in page for COW, page is locked
585 * by caller.
586 */
587static int
588init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
589{
590 struct buffer_head *bh = NULL;
591 int ret = 0;
592 sector_t isect;
593
594 dprintk("%s enter, %p\n", __func__, page);
595 BUG_ON(PageUptodate(page));
596 if (!cow_read) {
597 zero_user_segment(page, 0, PAGE_SIZE);
598 SetPageUptodate(page);
599 goto cleanup;
600 }
601
602 bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
603 if (!bh) {
604 ret = -ENOMEM;
605 goto cleanup;
606 }
607
608 isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
609 map_block(bh, isect, cow_read);
610 if (!bh_uptodate_or_lock(bh))
611 ret = bh_submit_read(bh);
612 if (ret)
613 goto cleanup;
614 SetPageUptodate(page);
615
616cleanup:
71cdd40f
PT
617 if (bh)
618 free_buffer_head(bh);
619 if (ret) {
620 /* Need to mark layout with bad read...should now
621 * just use nfs4 for reads and writes.
622 */
623 mark_bad_read();
624 }
625 return ret;
626}
627
72c50887
PT
628/* Find or create a zeroing page marked being writeback.
629 * Return ERR_PTR on error, NULL to indicate skip this page and page itself
630 * to indicate write out.
631 */
632static struct page *
633bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
634 struct pnfs_block_extent *cow_read)
635{
636 struct page *page;
637 int locked = 0;
638 page = find_get_page(inode->i_mapping, index);
639 if (page)
640 goto check_page;
641
642 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
643 if (unlikely(!page)) {
644 dprintk("%s oom\n", __func__);
645 return ERR_PTR(-ENOMEM);
646 }
647 locked = 1;
648
649check_page:
650 /* PageDirty: Other will write this out
651 * PageWriteback: Other is writing this out
652 * PageUptodate: It was read before
653 */
654 if (PageDirty(page) || PageWriteback(page)) {
655 print_page(page);
656 if (locked)
657 unlock_page(page);
658 page_cache_release(page);
659 return NULL;
660 }
661
662 if (!locked) {
663 lock_page(page);
664 locked = 1;
665 goto check_page;
666 }
667 if (!PageUptodate(page)) {
668 /* New page, readin or zero it */
669 init_page_for_write(page, cow_read);
670 }
671 set_page_writeback(page);
672 unlock_page(page);
673
674 return page;
675}
676
155e7524 677static enum pnfs_try_status
9c7e1b3d 678bl_write_pagelist(struct nfs_pgio_data *wdata, int sync)
155e7524 679{
cd841605 680 struct nfs_pgio_header *header = wdata->header;
71cdd40f 681 int i, ret, npg_zero, pg_index, last = 0;
650e2d39 682 struct bio *bio = NULL;
71cdd40f
PT
683 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
684 sector_t isect, last_isect = 0, extent_length = 0;
96c9eae6 685 struct parallel_io *par = NULL;
650e2d39
FI
686 loff_t offset = wdata->args.offset;
687 size_t count = wdata->args.count;
fe6e1e8d 688 unsigned int pg_offset, pg_len, saved_len;
650e2d39 689 struct page **pages = wdata->args.pages;
71cdd40f
PT
690 struct page *page;
691 pgoff_t index;
692 u64 temp;
693 int npg_per_block =
cd841605 694 NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
650e2d39
FI
695
696 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
96c9eae6
PT
697
698 if (header->dreq != NULL &&
699 (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
700 !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
701 dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
702 goto out_mds;
703 }
823b0c9d 704 /* At this point, wdata->page_aray is a (sequential) list of nfs_pages.
71cdd40f
PT
705 * We want to write each, and if there is an error set pnfs_error
706 * to have it redone using nfs.
650e2d39
FI
707 */
708 par = alloc_parallel(wdata);
709 if (!par)
7c5465d6 710 goto out_mds;
650e2d39
FI
711 par->pnfs_callback = bl_end_par_io_write;
712 /* At this point, have to be more careful with error handling */
713
714 isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
cd841605 715 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
71cdd40f
PT
716 if (!be || !is_writable(be, isect)) {
717 dprintk("%s no matching extents!\n", __func__);
7c5465d6 718 goto out_mds;
71cdd40f
PT
719 }
720
721 /* First page inside INVALID extent */
722 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
7c5465d6
PT
723 if (likely(!bl_push_one_short_extent(be->be_inval)))
724 par->bse_count++;
725 else
726 goto out_mds;
71cdd40f
PT
727 temp = offset >> PAGE_CACHE_SHIFT;
728 npg_zero = do_div(temp, npg_per_block);
729 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
730 (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
731 extent_length = be->be_length - (isect - be->be_f_offset);
732
733fill_invalid_ext:
734 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
735 for (;npg_zero > 0; npg_zero--) {
75422745
PT
736 if (bl_is_sector_init(be->be_inval, isect)) {
737 dprintk("isect %llu already init\n",
738 (unsigned long long)isect);
739 goto next_page;
740 }
71cdd40f
PT
741 /* page ref released in bl_end_io_write_zero */
742 index = isect >> PAGE_CACHE_SECTOR_SHIFT;
743 dprintk("%s zero %dth page: index %lu isect %llu\n",
744 __func__, npg_zero, index,
745 (unsigned long long)isect);
cd841605 746 page = bl_find_get_zeroing_page(header->inode, index,
72c50887
PT
747 cow_read);
748 if (unlikely(IS_ERR(page))) {
cd841605 749 header->pnfs_error = PTR_ERR(page);
71cdd40f 750 goto out;
72c50887 751 } else if (page == NULL)
71cdd40f 752 goto next_page;
71cdd40f
PT
753
754 ret = bl_mark_sectors_init(be->be_inval, isect,
60c52e3a 755 PAGE_CACHE_SECTORS);
71cdd40f
PT
756 if (unlikely(ret)) {
757 dprintk("%s bl_mark_sectors_init fail %d\n",
758 __func__, ret);
759 end_page_writeback(page);
760 page_cache_release(page);
cd841605 761 header->pnfs_error = ret;
71cdd40f
PT
762 goto out;
763 }
7c5465d6
PT
764 if (likely(!bl_push_one_short_extent(be->be_inval)))
765 par->bse_count++;
766 else {
767 end_page_writeback(page);
768 page_cache_release(page);
cd841605 769 header->pnfs_error = -ENOMEM;
7c5465d6
PT
770 goto out;
771 }
772 /* FIXME: This should be done in bi_end_io */
cd841605 773 mark_extents_written(BLK_LSEG2EXT(header->lseg),
7c5465d6
PT
774 page->index << PAGE_CACHE_SHIFT,
775 PAGE_CACHE_SIZE);
776
71cdd40f
PT
777 bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
778 isect, page, be,
779 bl_end_io_write_zero, par);
780 if (IS_ERR(bio)) {
cd841605 781 header->pnfs_error = PTR_ERR(bio);
e6d05a75 782 bio = NULL;
71cdd40f
PT
783 goto out;
784 }
71cdd40f
PT
785next_page:
786 isect += PAGE_CACHE_SECTORS;
787 extent_length -= PAGE_CACHE_SECTORS;
788 }
789 if (last)
790 goto write_done;
791 }
792 bio = bl_submit_bio(WRITE, bio);
793
794 /* Middle pages */
795 pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
823b0c9d 796 for (i = pg_index; i < wdata->page_array.npages; i++) {
650e2d39
FI
797 if (!extent_length) {
798 /* We've used up the previous extent */
799 bl_put_extent(be);
fe6e1e8d 800 bl_put_extent(cow_read);
650e2d39
FI
801 bio = bl_submit_bio(WRITE, bio);
802 /* Get the next one */
cd841605 803 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
fe6e1e8d 804 isect, &cow_read);
650e2d39 805 if (!be || !is_writable(be, isect)) {
cd841605 806 header->pnfs_error = -EINVAL;
650e2d39
FI
807 goto out;
808 }
7c5465d6
PT
809 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
810 if (likely(!bl_push_one_short_extent(
811 be->be_inval)))
812 par->bse_count++;
813 else {
cd841605 814 header->pnfs_error = -ENOMEM;
7c5465d6
PT
815 goto out;
816 }
817 }
650e2d39 818 extent_length = be->be_length -
71cdd40f 819 (isect - be->be_f_offset);
650e2d39 820 }
fe6e1e8d
PT
821
822 dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
823 pg_offset = offset & ~PAGE_CACHE_MASK;
824 if (pg_offset + count > PAGE_CACHE_SIZE)
825 pg_len = PAGE_CACHE_SIZE - pg_offset;
826 else
827 pg_len = count;
828
829 saved_len = pg_len;
830 if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
831 !bl_is_sector_init(be->be_inval, isect)) {
832 ret = bl_read_partial_page_sync(pages[i], cow_read,
833 pg_offset, pg_len, true);
834 if (ret) {
835 dprintk("%s bl_read_partial_page_sync fail %d\n",
836 __func__, ret);
837 header->pnfs_error = ret;
838 goto out;
839 }
840
71cdd40f 841 ret = bl_mark_sectors_init(be->be_inval, isect,
60c52e3a 842 PAGE_CACHE_SECTORS);
71cdd40f
PT
843 if (unlikely(ret)) {
844 dprintk("%s bl_mark_sectors_init fail %d\n",
845 __func__, ret);
cd841605 846 header->pnfs_error = ret;
71cdd40f 847 goto out;
650e2d39 848 }
fe6e1e8d
PT
849
850 /* Expand to full page write */
851 pg_offset = 0;
852 pg_len = PAGE_CACHE_SIZE;
853 } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
854 (pg_len & (SECTOR_SIZE - 1))){
855 /* ahh, nasty case. We have to do sync full sector
856 * read-modify-write cycles.
857 */
858 unsigned int saved_offset = pg_offset;
859 ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
860 pg_len, false);
861 pg_offset = round_down(pg_offset, SECTOR_SIZE);
862 pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
863 - pg_offset;
71cdd40f 864 }
fe6e1e8d
PT
865
866
823b0c9d
WAA
867 bio = do_add_page_to_bio(bio, wdata->page_array.npages - i,
868 WRITE,
71cdd40f 869 isect, pages[i], be,
fe6e1e8d
PT
870 bl_end_io_write, par,
871 pg_offset, pg_len);
71cdd40f 872 if (IS_ERR(bio)) {
cd841605 873 header->pnfs_error = PTR_ERR(bio);
e6d05a75 874 bio = NULL;
71cdd40f 875 goto out;
650e2d39 876 }
fe6e1e8d
PT
877 offset += saved_len;
878 count -= saved_len;
650e2d39 879 isect += PAGE_CACHE_SECTORS;
71cdd40f 880 last_isect = isect;
650e2d39
FI
881 extent_length -= PAGE_CACHE_SECTORS;
882 }
71cdd40f
PT
883
884 /* Last page inside INVALID extent */
885 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
886 bio = bl_submit_bio(WRITE, bio);
887 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
888 npg_zero = npg_per_block - do_div(temp, npg_per_block);
889 if (npg_zero < npg_per_block) {
890 last = 1;
891 goto fill_invalid_ext;
892 }
893 }
894
895write_done:
fe6e1e8d 896 wdata->res.count = wdata->args.count;
650e2d39
FI
897out:
898 bl_put_extent(be);
fe6e1e8d 899 bl_put_extent(cow_read);
650e2d39
FI
900 bl_submit_bio(WRITE, bio);
901 put_parallel(par);
902 return PNFS_ATTEMPTED;
7c5465d6
PT
903out_mds:
904 bl_put_extent(be);
fe6e1e8d 905 bl_put_extent(cow_read);
7c5465d6
PT
906 kfree(par);
907 return PNFS_NOT_ATTEMPTED;
155e7524
FI
908}
909
9e692969 910/* FIXME - range ignored */
155e7524 911static void
9e692969 912release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
155e7524 913{
9e692969
FI
914 int i;
915 struct pnfs_block_extent *be;
916
917 spin_lock(&bl->bl_ext_lock);
918 for (i = 0; i < EXTENT_LISTS; i++) {
919 while (!list_empty(&bl->bl_extents[i])) {
920 be = list_first_entry(&bl->bl_extents[i],
921 struct pnfs_block_extent,
922 be_node);
923 list_del(&be->be_node);
924 bl_put_extent(be);
925 }
926 }
927 spin_unlock(&bl->bl_ext_lock);
155e7524
FI
928}
929
155e7524
FI
930static void
931release_inval_marks(struct pnfs_inval_markings *marks)
932{
c1c2a4cd 933 struct pnfs_inval_tracking *pos, *temp;
7c5465d6 934 struct pnfs_block_short_extent *se, *stemp;
c1c2a4cd
FI
935
936 list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
937 list_del(&pos->it_link);
938 kfree(pos);
939 }
7c5465d6
PT
940
941 list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
942 list_del(&se->bse_node);
943 kfree(se);
944 }
155e7524
FI
945 return;
946}
947
948static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
949{
950 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
951
952 dprintk("%s enter\n", __func__);
953 release_extents(bl, NULL);
954 release_inval_marks(&bl->bl_inval);
955 kfree(bl);
956}
957
958static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
959 gfp_t gfp_flags)
960{
961 struct pnfs_block_layout *bl;
962
963 dprintk("%s enter\n", __func__);
964 bl = kzalloc(sizeof(*bl), gfp_flags);
965 if (!bl)
966 return NULL;
967 spin_lock_init(&bl->bl_ext_lock);
968 INIT_LIST_HEAD(&bl->bl_extents[0]);
969 INIT_LIST_HEAD(&bl->bl_extents[1]);
970 INIT_LIST_HEAD(&bl->bl_commit);
971 INIT_LIST_HEAD(&bl->bl_committing);
972 bl->bl_count = 0;
973 bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
974 BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
975 return &bl->bl_layout;
976}
977
a60d2ebd 978static void bl_free_lseg(struct pnfs_layout_segment *lseg)
155e7524 979{
a60d2ebd
FI
980 dprintk("%s enter\n", __func__);
981 kfree(lseg);
155e7524
FI
982}
983
a60d2ebd
FI
984/* We pretty much ignore lseg, and store all data layout wide, so we
985 * can correctly merge.
986 */
987static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
988 struct nfs4_layoutget_res *lgr,
989 gfp_t gfp_flags)
155e7524 990{
a60d2ebd
FI
991 struct pnfs_layout_segment *lseg;
992 int status;
993
994 dprintk("%s enter\n", __func__);
995 lseg = kzalloc(sizeof(*lseg), gfp_flags);
996 if (!lseg)
997 return ERR_PTR(-ENOMEM);
998 status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
999 if (status) {
1000 /* We don't want to call the full-blown bl_free_lseg,
1001 * since on error extents were not touched.
1002 */
1003 kfree(lseg);
1004 return ERR_PTR(status);
1005 }
1006 return lseg;
155e7524
FI
1007}
1008
1009static void
1010bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
1011 const struct nfs4_layoutcommit_args *arg)
1012{
90ace12a
FI
1013 dprintk("%s enter\n", __func__);
1014 encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
155e7524
FI
1015}
1016
1017static void
1018bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
1019{
b2be7811
FI
1020 struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
1021
1022 dprintk("%s enter\n", __func__);
1023 clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
155e7524
FI
1024}
1025
2f9fd182
FI
1026static void free_blk_mountid(struct block_mount_id *mid)
1027{
1028 if (mid) {
93a3844e
PT
1029 struct pnfs_block_dev *dev, *tmp;
1030
1031 /* No need to take bm_lock as we are last user freeing bm_devlist */
1032 list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
2f9fd182
FI
1033 list_del(&dev->bm_node);
1034 bl_free_block_dev(dev);
1035 }
2f9fd182
FI
1036 kfree(mid);
1037 }
1038}
1039
78e4e05c 1040/* This is mostly copied from the filelayout_get_device_info function.
2f9fd182
FI
1041 * It seems much of this should be at the generic pnfs level.
1042 */
1043static struct pnfs_block_dev *
1044nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
1045 struct nfs4_deviceid *d_id)
1046{
1047 struct pnfs_device *dev;
516f2e24 1048 struct pnfs_block_dev *rv;
2f9fd182
FI
1049 u32 max_resp_sz;
1050 int max_pages;
1051 struct page **pages = NULL;
1052 int i, rc;
1053
1054 /*
1055 * Use the session max response size as the basis for setting
1056 * GETDEVICEINFO's maxcount
1057 */
1058 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
10bd295a 1059 max_pages = nfs_page_array_len(0, max_resp_sz);
2f9fd182
FI
1060 dprintk("%s max_resp_sz %u max_pages %d\n",
1061 __func__, max_resp_sz, max_pages);
1062
1063 dev = kmalloc(sizeof(*dev), GFP_NOFS);
1064 if (!dev) {
1065 dprintk("%s kmalloc failed\n", __func__);
516f2e24 1066 return ERR_PTR(-ENOMEM);
2f9fd182
FI
1067 }
1068
1069 pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
1070 if (pages == NULL) {
1071 kfree(dev);
516f2e24 1072 return ERR_PTR(-ENOMEM);
2f9fd182
FI
1073 }
1074 for (i = 0; i < max_pages; i++) {
1075 pages[i] = alloc_page(GFP_NOFS);
516f2e24
JR
1076 if (!pages[i]) {
1077 rv = ERR_PTR(-ENOMEM);
2f9fd182 1078 goto out_free;
516f2e24 1079 }
2f9fd182
FI
1080 }
1081
1082 memcpy(&dev->dev_id, d_id, sizeof(*d_id));
1083 dev->layout_type = LAYOUT_BLOCK_VOLUME;
1084 dev->pages = pages;
1085 dev->pgbase = 0;
1086 dev->pglen = PAGE_SIZE * max_pages;
1087 dev->mincount = 0;
968fe252 1088 dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
2f9fd182
FI
1089
1090 dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
cd5875fe 1091 rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
2f9fd182 1092 dprintk("%s getdevice info returns %d\n", __func__, rc);
516f2e24
JR
1093 if (rc) {
1094 rv = ERR_PTR(rc);
2f9fd182 1095 goto out_free;
516f2e24 1096 }
2f9fd182
FI
1097
1098 rv = nfs4_blk_decode_device(server, dev);
1099 out_free:
1100 for (i = 0; i < max_pages; i++)
1101 __free_page(pages[i]);
1102 kfree(pages);
1103 kfree(dev);
1104 return rv;
1105}
1106
155e7524
FI
1107static int
1108bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
1109{
2f9fd182
FI
1110 struct block_mount_id *b_mt_id = NULL;
1111 struct pnfs_devicelist *dlist = NULL;
1112 struct pnfs_block_dev *bdev;
1113 LIST_HEAD(block_disklist);
516f2e24 1114 int status, i;
2f9fd182 1115
155e7524 1116 dprintk("%s enter\n", __func__);
2f9fd182
FI
1117
1118 if (server->pnfs_blksize == 0) {
1119 dprintk("%s Server did not return blksize\n", __func__);
1120 return -EINVAL;
1121 }
1122 b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
1123 if (!b_mt_id) {
1124 status = -ENOMEM;
1125 goto out_error;
1126 }
1127 /* Initialize nfs4 block layout mount id */
1128 spin_lock_init(&b_mt_id->bm_lock);
1129 INIT_LIST_HEAD(&b_mt_id->bm_devlist);
1130
1131 dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
1132 if (!dlist) {
1133 status = -ENOMEM;
1134 goto out_error;
1135 }
1136 dlist->eof = 0;
1137 while (!dlist->eof) {
1138 status = nfs4_proc_getdevicelist(server, fh, dlist);
1139 if (status)
1140 goto out_error;
1141 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
1142 __func__, dlist->num_devs, dlist->eof);
1143 for (i = 0; i < dlist->num_devs; i++) {
1144 bdev = nfs4_blk_get_deviceinfo(server, fh,
1145 &dlist->dev_id[i]);
516f2e24
JR
1146 if (IS_ERR(bdev)) {
1147 status = PTR_ERR(bdev);
2f9fd182
FI
1148 goto out_error;
1149 }
1150 spin_lock(&b_mt_id->bm_lock);
1151 list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
1152 spin_unlock(&b_mt_id->bm_lock);
1153 }
1154 }
1155 dprintk("%s SUCCESS\n", __func__);
1156 server->pnfs_ld_data = b_mt_id;
1157
1158 out_return:
1159 kfree(dlist);
1160 return status;
1161
1162 out_error:
1163 free_blk_mountid(b_mt_id);
1164 goto out_return;
155e7524
FI
1165}
1166
1167static int
1168bl_clear_layoutdriver(struct nfs_server *server)
1169{
2f9fd182
FI
1170 struct block_mount_id *b_mt_id = server->pnfs_ld_data;
1171
155e7524 1172 dprintk("%s enter\n", __func__);
2f9fd182
FI
1173 free_blk_mountid(b_mt_id);
1174 dprintk("%s RETURNS\n", __func__);
155e7524
FI
1175 return 0;
1176}
1177
f742dc4a
PT
1178static bool
1179is_aligned_req(struct nfs_page *req, unsigned int alignment)
1180{
1181 return IS_ALIGNED(req->wb_offset, alignment) &&
1182 IS_ALIGNED(req->wb_bytes, alignment);
1183}
1184
1185static void
1186bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1187{
1188 if (pgio->pg_dreq != NULL &&
1189 !is_aligned_req(req, SECTOR_SIZE))
1190 nfs_pageio_reset_read_mds(pgio);
1191 else
1192 pnfs_generic_pg_init_read(pgio, req);
1193}
1194
b4fdac1a
WAA
1195/*
1196 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1197 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1198 */
1199static size_t
f742dc4a
PT
1200bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1201 struct nfs_page *req)
1202{
1203 if (pgio->pg_dreq != NULL &&
1204 !is_aligned_req(req, SECTOR_SIZE))
b4fdac1a 1205 return 0;
f742dc4a
PT
1206
1207 return pnfs_generic_pg_test(pgio, prev, req);
1208}
1209
6296556f
PT
1210/*
1211 * Return the number of contiguous bytes for a given inode
1212 * starting at page frame idx.
1213 */
1214static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
1215{
1216 struct address_space *mapping = inode->i_mapping;
1217 pgoff_t end;
1218
1219 /* Optimize common case that writes from 0 to end of file */
1220 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
1221 if (end != NFS_I(inode)->npages) {
1222 rcu_read_lock();
e7b563bb 1223 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
6296556f
PT
1224 rcu_read_unlock();
1225 }
1226
1227 if (!end)
1228 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
1229 else
1230 return (end - idx) << PAGE_CACHE_SHIFT;
1231}
1232
6f018efa 1233static void
96c9eae6
PT
1234bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1235{
1236 if (pgio->pg_dreq != NULL &&
6296556f 1237 !is_aligned_req(req, PAGE_CACHE_SIZE)) {
96c9eae6 1238 nfs_pageio_reset_write_mds(pgio);
6296556f
PT
1239 } else {
1240 u64 wb_size;
1241 if (pgio->pg_dreq == NULL)
1242 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
1243 req->wb_index);
1244 else
1245 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1246
1247 pnfs_generic_pg_init_write(pgio, req, wb_size);
1248 }
96c9eae6
PT
1249}
1250
b4fdac1a
WAA
1251/*
1252 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1253 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1254 */
1255static size_t
96c9eae6
PT
1256bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1257 struct nfs_page *req)
1258{
1259 if (pgio->pg_dreq != NULL &&
1260 !is_aligned_req(req, PAGE_CACHE_SIZE))
b4fdac1a 1261 return 0;
96c9eae6
PT
1262
1263 return pnfs_generic_pg_test(pgio, prev, req);
1264}
1265
e9643fe8 1266static const struct nfs_pageio_ops bl_pg_read_ops = {
f742dc4a
PT
1267 .pg_init = bl_pg_init_read,
1268 .pg_test = bl_pg_test_read,
e9643fe8
BH
1269 .pg_doio = pnfs_generic_pg_readpages,
1270};
1271
1272static const struct nfs_pageio_ops bl_pg_write_ops = {
96c9eae6
PT
1273 .pg_init = bl_pg_init_write,
1274 .pg_test = bl_pg_test_write,
e9643fe8
BH
1275 .pg_doio = pnfs_generic_pg_writepages,
1276};
1277
155e7524
FI
1278static struct pnfs_layoutdriver_type blocklayout_type = {
1279 .id = LAYOUT_BLOCK_VOLUME,
1280 .name = "LAYOUT_BLOCK_VOLUME",
5a12cca6 1281 .owner = THIS_MODULE,
155e7524
FI
1282 .read_pagelist = bl_read_pagelist,
1283 .write_pagelist = bl_write_pagelist,
1284 .alloc_layout_hdr = bl_alloc_layout_hdr,
1285 .free_layout_hdr = bl_free_layout_hdr,
1286 .alloc_lseg = bl_alloc_lseg,
1287 .free_lseg = bl_free_lseg,
1288 .encode_layoutcommit = bl_encode_layoutcommit,
1289 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
1290 .set_layoutdriver = bl_set_layoutdriver,
1291 .clear_layoutdriver = bl_clear_layoutdriver,
e9643fe8
BH
1292 .pg_read_ops = &bl_pg_read_ops,
1293 .pg_write_ops = &bl_pg_write_ops,
155e7524
FI
1294};
1295
fe0a9b74 1296static const struct rpc_pipe_ops bl_upcall_ops = {
c1225158 1297 .upcall = rpc_pipe_generic_upcall,
fe0a9b74
JR
1298 .downcall = bl_pipe_downcall,
1299 .destroy_msg = bl_pipe_destroy_msg,
1300};
1301
332dfab6
SK
1302static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1303 struct rpc_pipe *pipe)
1304{
1305 struct dentry *dir, *dentry;
1306
1307 dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1308 if (dir == NULL)
1309 return ERR_PTR(-ENOENT);
1310 dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1311 dput(dir);
1312 return dentry;
1313}
1314
1315static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1316 struct rpc_pipe *pipe)
1317{
1318 if (pipe->dentry)
1319 rpc_unlink(pipe->dentry);
1320}
1321
627f3066
SK
1322static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1323 void *ptr)
1324{
1325 struct super_block *sb = ptr;
1326 struct net *net = sb->s_fs_info;
1327 struct nfs_net *nn = net_generic(net, nfs_net_id);
1328 struct dentry *dentry;
1329 int ret = 0;
1330
1331 if (!try_module_get(THIS_MODULE))
1332 return 0;
1333
1334 if (nn->bl_device_pipe == NULL) {
1335 module_put(THIS_MODULE);
1336 return 0;
1337 }
1338
1339 switch (event) {
1340 case RPC_PIPEFS_MOUNT:
1341 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1342 if (IS_ERR(dentry)) {
1343 ret = PTR_ERR(dentry);
1344 break;
1345 }
1346 nn->bl_device_pipe->dentry = dentry;
1347 break;
1348 case RPC_PIPEFS_UMOUNT:
1349 if (nn->bl_device_pipe->dentry)
1350 nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1351 break;
1352 default:
1353 ret = -ENOTSUPP;
1354 break;
1355 }
1356 module_put(THIS_MODULE);
1357 return ret;
1358}
1359
1360static struct notifier_block nfs4blocklayout_block = {
1361 .notifier_call = rpc_pipefs_event,
1362};
1363
332dfab6
SK
1364static struct dentry *nfs4blocklayout_register_net(struct net *net,
1365 struct rpc_pipe *pipe)
1366{
1367 struct super_block *pipefs_sb;
1368 struct dentry *dentry;
1369
1370 pipefs_sb = rpc_get_sb_net(net);
1371 if (!pipefs_sb)
2561d618 1372 return NULL;
332dfab6
SK
1373 dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1374 rpc_put_sb_net(net);
1375 return dentry;
1376}
1377
1378static void nfs4blocklayout_unregister_net(struct net *net,
1379 struct rpc_pipe *pipe)
1380{
1381 struct super_block *pipefs_sb;
1382
1383 pipefs_sb = rpc_get_sb_net(net);
1384 if (pipefs_sb) {
1385 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1386 rpc_put_sb_net(net);
1387 }
1388}
1389
9e2e74db
SK
1390static int nfs4blocklayout_net_init(struct net *net)
1391{
1392 struct nfs_net *nn = net_generic(net, nfs_net_id);
1393 struct dentry *dentry;
1394
5ffaf855 1395 init_waitqueue_head(&nn->bl_wq);
9e2e74db
SK
1396 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1397 if (IS_ERR(nn->bl_device_pipe))
1398 return PTR_ERR(nn->bl_device_pipe);
1399 dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1400 if (IS_ERR(dentry)) {
1401 rpc_destroy_pipe_data(nn->bl_device_pipe);
1402 return PTR_ERR(dentry);
1403 }
1404 nn->bl_device_pipe->dentry = dentry;
1405 return 0;
1406}
1407
1408static void nfs4blocklayout_net_exit(struct net *net)
1409{
1410 struct nfs_net *nn = net_generic(net, nfs_net_id);
1411
1412 nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1413 rpc_destroy_pipe_data(nn->bl_device_pipe);
1414 nn->bl_device_pipe = NULL;
1415}
1416
1417static struct pernet_operations nfs4blocklayout_net_ops = {
1418 .init = nfs4blocklayout_net_init,
1419 .exit = nfs4blocklayout_net_exit,
1420};
1421
155e7524
FI
1422static int __init nfs4blocklayout_init(void)
1423{
1424 int ret;
1425
1426 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1427
1428 ret = pnfs_register_layoutdriver(&blocklayout_type);
fe0a9b74
JR
1429 if (ret)
1430 goto out;
1431
627f3066 1432 ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
9e2e74db
SK
1433 if (ret)
1434 goto out_remove;
627f3066
SK
1435 ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1436 if (ret)
1437 goto out_notifier;
fe0a9b74
JR
1438out:
1439 return ret;
1440
627f3066
SK
1441out_notifier:
1442 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
fe0a9b74
JR
1443out_remove:
1444 pnfs_unregister_layoutdriver(&blocklayout_type);
155e7524
FI
1445 return ret;
1446}
1447
1448static void __exit nfs4blocklayout_exit(void)
1449{
1450 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1451 __func__);
1452
627f3066 1453 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
9e2e74db 1454 unregister_pernet_subsys(&nfs4blocklayout_net_ops);
155e7524
FI
1455 pnfs_unregister_layoutdriver(&blocklayout_type);
1456}
1457
1458MODULE_ALIAS("nfs-layouttype4-3");
1459
1460module_init(nfs4blocklayout_init);
1461module_exit(nfs4blocklayout_exit);