pnfsblock: fix partial page buffer wirte
[linux-2.6-block.git] / fs / nfs / blocklayout / blocklayout.c
CommitLineData
155e7524
FI
1/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
9549ec01 32
155e7524
FI
33#include <linux/module.h>
34#include <linux/init.h>
fe0a9b74
JR
35#include <linux/mount.h>
36#include <linux/namei.h>
9549ec01 37#include <linux/bio.h> /* struct bio */
71cdd40f 38#include <linux/buffer_head.h> /* various write calls */
88c9e421 39#include <linux/prefetch.h>
155e7524 40
10bd295a
JR
41#include "../pnfs.h"
42#include "../internal.h"
155e7524
FI
43#include "blocklayout.h"
44
45#define NFSDBG_FACILITY NFSDBG_PNFS_LD
46
47MODULE_LICENSE("GPL");
48MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
49MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
50
9549ec01
FI
51static void print_page(struct page *page)
52{
53 dprintk("PRINTPAGE page %p\n", page);
54 dprintk(" PagePrivate %d\n", PagePrivate(page));
55 dprintk(" PageUptodate %d\n", PageUptodate(page));
56 dprintk(" PageError %d\n", PageError(page));
57 dprintk(" PageDirty %d\n", PageDirty(page));
58 dprintk(" PageReferenced %d\n", PageReferenced(page));
59 dprintk(" PageLocked %d\n", PageLocked(page));
60 dprintk(" PageWriteback %d\n", PageWriteback(page));
61 dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
62 dprintk("\n");
63}
64
65/* Given the be associated with isect, determine if page data needs to be
66 * initialized.
67 */
68static int is_hole(struct pnfs_block_extent *be, sector_t isect)
69{
70 if (be->be_state == PNFS_BLOCK_NONE_DATA)
71 return 1;
72 else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
73 return 0;
74 else
75 return !bl_is_sector_init(be->be_inval, isect);
76}
77
650e2d39
FI
78/* Given the be associated with isect, determine if page data can be
79 * written to disk.
80 */
81static int is_writable(struct pnfs_block_extent *be, sector_t isect)
82{
71cdd40f
PT
83 return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
84 be->be_state == PNFS_BLOCK_INVALID_DATA);
650e2d39
FI
85}
86
9549ec01
FI
87/* The data we are handed might be spread across several bios. We need
88 * to track when the last one is finished.
89 */
90struct parallel_io {
91 struct kref refcnt;
7c5465d6 92 void (*pnfs_callback) (void *data, int num_se);
9549ec01 93 void *data;
7c5465d6 94 int bse_count;
9549ec01
FI
95};
96
97static inline struct parallel_io *alloc_parallel(void *data)
98{
99 struct parallel_io *rv;
100
101 rv = kmalloc(sizeof(*rv), GFP_NOFS);
102 if (rv) {
103 rv->data = data;
104 kref_init(&rv->refcnt);
7c5465d6 105 rv->bse_count = 0;
9549ec01
FI
106 }
107 return rv;
108}
109
110static inline void get_parallel(struct parallel_io *p)
111{
112 kref_get(&p->refcnt);
113}
114
115static void destroy_parallel(struct kref *kref)
116{
117 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
118
119 dprintk("%s enter\n", __func__);
7c5465d6 120 p->pnfs_callback(p->data, p->bse_count);
9549ec01
FI
121 kfree(p);
122}
123
124static inline void put_parallel(struct parallel_io *p)
125{
126 kref_put(&p->refcnt, destroy_parallel);
127}
128
129static struct bio *
130bl_submit_bio(int rw, struct bio *bio)
131{
132 if (bio) {
133 get_parallel(bio->bi_private);
134 dprintk("%s submitting %s bio %u@%llu\n", __func__,
135 rw == READ ? "read" : "write",
136 bio->bi_size, (unsigned long long)bio->bi_sector);
137 submit_bio(rw, bio);
138 }
139 return NULL;
140}
141
142static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
143 struct pnfs_block_extent *be,
144 void (*end_io)(struct bio *, int err),
145 struct parallel_io *par)
146{
147 struct bio *bio;
148
74a6eeb4 149 npg = min(npg, BIO_MAX_PAGES);
9549ec01 150 bio = bio_alloc(GFP_NOIO, npg);
74a6eeb4
PT
151 if (!bio && (current->flags & PF_MEMALLOC)) {
152 while (!bio && (npg /= 2))
153 bio = bio_alloc(GFP_NOIO, npg);
154 }
9549ec01 155
74a6eeb4
PT
156 if (bio) {
157 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
158 bio->bi_bdev = be->be_mdev;
159 bio->bi_end_io = end_io;
160 bio->bi_private = par;
161 }
9549ec01
FI
162 return bio;
163}
164
fe6e1e8d 165static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
9549ec01
FI
166 sector_t isect, struct page *page,
167 struct pnfs_block_extent *be,
168 void (*end_io)(struct bio *, int err),
fe6e1e8d
PT
169 struct parallel_io *par,
170 unsigned int offset, int len)
9549ec01 171{
fe6e1e8d
PT
172 isect = isect + (offset >> SECTOR_SHIFT);
173 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
174 npg, rw, (unsigned long long)isect, offset, len);
9549ec01
FI
175retry:
176 if (!bio) {
177 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
178 if (!bio)
179 return ERR_PTR(-ENOMEM);
180 }
fe6e1e8d 181 if (bio_add_page(bio, page, len, offset) < len) {
9549ec01
FI
182 bio = bl_submit_bio(rw, bio);
183 goto retry;
184 }
185 return bio;
186}
187
fe6e1e8d
PT
188static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
189 sector_t isect, struct page *page,
190 struct pnfs_block_extent *be,
191 void (*end_io)(struct bio *, int err),
192 struct parallel_io *par)
193{
194 return do_add_page_to_bio(bio, npg, rw, isect, page, be,
195 end_io, par, 0, PAGE_CACHE_SIZE);
196}
197
9549ec01
FI
198/* This is basically copied from mpage_end_io_read */
199static void bl_end_io_read(struct bio *bio, int err)
200{
201 struct parallel_io *par = bio->bi_private;
202 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
203 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
9549ec01
FI
204
205 do {
206 struct page *page = bvec->bv_page;
207
208 if (--bvec >= bio->bi_io_vec)
209 prefetchw(&bvec->bv_page->flags);
210 if (uptodate)
211 SetPageUptodate(page);
212 } while (bvec >= bio->bi_io_vec);
213 if (!uptodate) {
cd841605
FI
214 struct nfs_read_data *rdata = par->data;
215 struct nfs_pgio_header *header = rdata->header;
216
217 if (!header->pnfs_error)
218 header->pnfs_error = -EIO;
219 pnfs_set_lo_fail(header->lseg);
9549ec01
FI
220 }
221 bio_put(bio);
222 put_parallel(par);
223}
224
225static void bl_read_cleanup(struct work_struct *work)
226{
227 struct rpc_task *task;
228 struct nfs_read_data *rdata;
229 dprintk("%s enter\n", __func__);
230 task = container_of(work, struct rpc_task, u.tk_work);
231 rdata = container_of(task, struct nfs_read_data, task);
232 pnfs_ld_read_done(rdata);
233}
234
235static void
7c5465d6 236bl_end_par_io_read(void *data, int unused)
9549ec01
FI
237{
238 struct nfs_read_data *rdata = data;
239
cd841605 240 rdata->task.tk_status = rdata->header->pnfs_error;
9549ec01
FI
241 INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
242 schedule_work(&rdata->task.u.tk_work);
243}
244
155e7524
FI
245static enum pnfs_try_status
246bl_read_pagelist(struct nfs_read_data *rdata)
247{
cd841605 248 struct nfs_pgio_header *header = rdata->header;
9549ec01
FI
249 int i, hole;
250 struct bio *bio = NULL;
251 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
252 sector_t isect, extent_length = 0;
253 struct parallel_io *par;
254 loff_t f_offset = rdata->args.offset;
9549ec01
FI
255 struct page **pages = rdata->args.pages;
256 int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
257
6f00866d 258 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
30dd374f 259 rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
9549ec01
FI
260
261 par = alloc_parallel(rdata);
262 if (!par)
263 goto use_mds;
9549ec01
FI
264 par->pnfs_callback = bl_end_par_io_read;
265 /* At this point, we can no longer jump to use_mds */
266
267 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
268 /* Code assumes extents are page-aligned */
30dd374f 269 for (i = pg_index; i < rdata->pages.npages; i++) {
9549ec01
FI
270 if (!extent_length) {
271 /* We've used up the previous extent */
272 bl_put_extent(be);
273 bl_put_extent(cow_read);
274 bio = bl_submit_bio(READ, bio);
275 /* Get the next one */
cd841605 276 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
9549ec01
FI
277 isect, &cow_read);
278 if (!be) {
cd841605 279 header->pnfs_error = -EIO;
9549ec01
FI
280 goto out;
281 }
282 extent_length = be->be_length -
283 (isect - be->be_f_offset);
284 if (cow_read) {
285 sector_t cow_length = cow_read->be_length -
286 (isect - cow_read->be_f_offset);
287 extent_length = min(extent_length, cow_length);
288 }
289 }
290 hole = is_hole(be, isect);
291 if (hole && !cow_read) {
292 bio = bl_submit_bio(READ, bio);
293 /* Fill hole w/ zeroes w/o accessing device */
294 dprintk("%s Zeroing page for hole\n", __func__);
295 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
296 print_page(pages[i]);
297 SetPageUptodate(pages[i]);
298 } else {
299 struct pnfs_block_extent *be_read;
300
301 be_read = (hole && cow_read) ? cow_read : be;
30dd374f
FI
302 bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
303 READ,
9549ec01
FI
304 isect, pages[i], be_read,
305 bl_end_io_read, par);
306 if (IS_ERR(bio)) {
cd841605 307 header->pnfs_error = PTR_ERR(bio);
e6d05a75 308 bio = NULL;
9549ec01
FI
309 goto out;
310 }
311 }
312 isect += PAGE_CACHE_SECTORS;
313 extent_length -= PAGE_CACHE_SECTORS;
314 }
cd841605 315 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
9549ec01 316 rdata->res.eof = 1;
cd841605 317 rdata->res.count = header->inode->i_size - f_offset;
9549ec01
FI
318 } else {
319 rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
320 }
321out:
322 bl_put_extent(be);
323 bl_put_extent(cow_read);
324 bl_submit_bio(READ, bio);
325 put_parallel(par);
326 return PNFS_ATTEMPTED;
327
328 use_mds:
329 dprintk("Giving up and using normal NFS\n");
155e7524
FI
330 return PNFS_NOT_ATTEMPTED;
331}
332
31e6306a
FI
333static void mark_extents_written(struct pnfs_block_layout *bl,
334 __u64 offset, __u32 count)
335{
336 sector_t isect, end;
337 struct pnfs_block_extent *be;
7c5465d6 338 struct pnfs_block_short_extent *se;
31e6306a
FI
339
340 dprintk("%s(%llu, %u)\n", __func__, offset, count);
341 if (count == 0)
342 return;
343 isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
344 end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
345 end >>= SECTOR_SHIFT;
346 while (isect < end) {
347 sector_t len;
348 be = bl_find_get_extent(bl, isect, NULL);
349 BUG_ON(!be); /* FIXME */
350 len = min(end, be->be_f_offset + be->be_length) - isect;
7c5465d6
PT
351 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
352 se = bl_pop_one_short_extent(be->be_inval);
353 BUG_ON(!se);
354 bl_mark_for_commit(be, isect, len, se);
355 }
31e6306a
FI
356 isect += len;
357 bl_put_extent(be);
358 }
359}
360
71cdd40f
PT
361static void bl_end_io_write_zero(struct bio *bio, int err)
362{
363 struct parallel_io *par = bio->bi_private;
364 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
365 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
71cdd40f
PT
366
367 do {
368 struct page *page = bvec->bv_page;
369
370 if (--bvec >= bio->bi_io_vec)
371 prefetchw(&bvec->bv_page->flags);
372 /* This is the zeroing page we added */
373 end_page_writeback(page);
374 page_cache_release(page);
375 } while (bvec >= bio->bi_io_vec);
7c5465d6
PT
376
377 if (unlikely(!uptodate)) {
cd841605
FI
378 struct nfs_write_data *data = par->data;
379 struct nfs_pgio_header *header = data->header;
380
381 if (!header->pnfs_error)
382 header->pnfs_error = -EIO;
383 pnfs_set_lo_fail(header->lseg);
71cdd40f
PT
384 }
385 bio_put(bio);
386 put_parallel(par);
387}
388
650e2d39
FI
389static void bl_end_io_write(struct bio *bio, int err)
390{
391 struct parallel_io *par = bio->bi_private;
392 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
cd841605
FI
393 struct nfs_write_data *data = par->data;
394 struct nfs_pgio_header *header = data->header;
650e2d39
FI
395
396 if (!uptodate) {
cd841605
FI
397 if (!header->pnfs_error)
398 header->pnfs_error = -EIO;
399 pnfs_set_lo_fail(header->lseg);
650e2d39
FI
400 }
401 bio_put(bio);
402 put_parallel(par);
403}
404
405/* Function scheduled for call during bl_end_par_io_write,
406 * it marks sectors as written and extends the commitlist.
407 */
408static void bl_write_cleanup(struct work_struct *work)
409{
410 struct rpc_task *task;
411 struct nfs_write_data *wdata;
412 dprintk("%s enter\n", __func__);
413 task = container_of(work, struct rpc_task, u.tk_work);
414 wdata = container_of(task, struct nfs_write_data, task);
cd841605 415 if (likely(!wdata->header->pnfs_error)) {
31e6306a 416 /* Marks for LAYOUTCOMMIT */
cd841605 417 mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
31e6306a
FI
418 wdata->args.offset, wdata->args.count);
419 }
650e2d39
FI
420 pnfs_ld_write_done(wdata);
421}
422
423/* Called when last of bios associated with a bl_write_pagelist call finishes */
7c5465d6 424static void bl_end_par_io_write(void *data, int num_se)
650e2d39
FI
425{
426 struct nfs_write_data *wdata = data;
427
cd841605
FI
428 if (unlikely(wdata->header->pnfs_error)) {
429 bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
7c5465d6
PT
430 num_se);
431 }
432
cd841605 433 wdata->task.tk_status = wdata->header->pnfs_error;
650e2d39
FI
434 wdata->verf.committed = NFS_FILE_SYNC;
435 INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
436 schedule_work(&wdata->task.u.tk_work);
437}
438
71cdd40f
PT
439/* FIXME STUB - mark intersection of layout and page as bad, so is not
440 * used again.
441 */
442static void mark_bad_read(void)
443{
444 return;
445}
446
447/*
448 * map_block: map a requested I/0 block (isect) into an offset in the LVM
449 * block_device
450 */
451static void
452map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
453{
454 dprintk("%s enter be=%p\n", __func__, be);
455
456 set_buffer_mapped(bh);
457 bh->b_bdev = be->be_mdev;
458 bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
459 (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
460
461 dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
462 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
463 bh->b_size);
464 return;
465}
466
fe6e1e8d
PT
467static void
468bl_read_single_end_io(struct bio *bio, int error)
469{
470 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
471 struct page *page = bvec->bv_page;
472
473 /* Only one page in bvec */
474 unlock_page(page);
475}
476
477static int
478bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
479 unsigned int offset, unsigned int len)
480{
481 struct bio *bio;
482 struct page *shadow_page;
483 sector_t isect;
484 char *kaddr, *kshadow_addr;
485 int ret = 0;
486
487 dprintk("%s: offset %u len %u\n", __func__, offset, len);
488
489 shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
490 if (shadow_page == NULL)
491 return -ENOMEM;
492
493 bio = bio_alloc(GFP_NOIO, 1);
494 if (bio == NULL)
495 return -ENOMEM;
496
497 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
498 (offset / SECTOR_SIZE);
499
500 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
501 bio->bi_bdev = be->be_mdev;
502 bio->bi_end_io = bl_read_single_end_io;
503
504 lock_page(shadow_page);
505 if (bio_add_page(bio, shadow_page,
506 SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
507 unlock_page(shadow_page);
508 bio_put(bio);
509 return -EIO;
510 }
511
512 submit_bio(READ, bio);
513 wait_on_page_locked(shadow_page);
514 if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
515 ret = -EIO;
516 } else {
517 kaddr = kmap_atomic(page);
518 kshadow_addr = kmap_atomic(shadow_page);
519 memcpy(kaddr + offset, kshadow_addr + offset, len);
520 kunmap_atomic(kshadow_addr);
521 kunmap_atomic(kaddr);
522 }
523 __free_page(shadow_page);
524 bio_put(bio);
525
526 return ret;
527}
528
529static int
530bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
531 unsigned int dirty_offset, unsigned int dirty_len,
532 bool full_page)
533{
534 int ret = 0;
535 unsigned int start, end;
536
537 if (full_page) {
538 start = 0;
539 end = PAGE_CACHE_SIZE;
540 } else {
541 start = round_down(dirty_offset, SECTOR_SIZE);
542 end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
543 }
544
545 dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
546 if (!be) {
547 zero_user_segments(page, start, dirty_offset,
548 dirty_offset + dirty_len, end);
549 if (start == 0 && end == PAGE_CACHE_SIZE &&
550 trylock_page(page)) {
551 SetPageUptodate(page);
552 unlock_page(page);
553 }
554 return ret;
555 }
556
557 if (start != dirty_offset)
558 ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
559
560 if (!ret && (dirty_offset + dirty_len < end))
561 ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
562 end - dirty_offset - dirty_len);
563
564 return ret;
565}
566
71cdd40f
PT
567/* Given an unmapped page, zero it or read in page for COW, page is locked
568 * by caller.
569 */
570static int
571init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
572{
573 struct buffer_head *bh = NULL;
574 int ret = 0;
575 sector_t isect;
576
577 dprintk("%s enter, %p\n", __func__, page);
578 BUG_ON(PageUptodate(page));
579 if (!cow_read) {
580 zero_user_segment(page, 0, PAGE_SIZE);
581 SetPageUptodate(page);
582 goto cleanup;
583 }
584
585 bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
586 if (!bh) {
587 ret = -ENOMEM;
588 goto cleanup;
589 }
590
591 isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
592 map_block(bh, isect, cow_read);
593 if (!bh_uptodate_or_lock(bh))
594 ret = bh_submit_read(bh);
595 if (ret)
596 goto cleanup;
597 SetPageUptodate(page);
598
599cleanup:
71cdd40f
PT
600 if (bh)
601 free_buffer_head(bh);
602 if (ret) {
603 /* Need to mark layout with bad read...should now
604 * just use nfs4 for reads and writes.
605 */
606 mark_bad_read();
607 }
608 return ret;
609}
610
72c50887
PT
611/* Find or create a zeroing page marked being writeback.
612 * Return ERR_PTR on error, NULL to indicate skip this page and page itself
613 * to indicate write out.
614 */
615static struct page *
616bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
617 struct pnfs_block_extent *cow_read)
618{
619 struct page *page;
620 int locked = 0;
621 page = find_get_page(inode->i_mapping, index);
622 if (page)
623 goto check_page;
624
625 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
626 if (unlikely(!page)) {
627 dprintk("%s oom\n", __func__);
628 return ERR_PTR(-ENOMEM);
629 }
630 locked = 1;
631
632check_page:
633 /* PageDirty: Other will write this out
634 * PageWriteback: Other is writing this out
635 * PageUptodate: It was read before
636 */
637 if (PageDirty(page) || PageWriteback(page)) {
638 print_page(page);
639 if (locked)
640 unlock_page(page);
641 page_cache_release(page);
642 return NULL;
643 }
644
645 if (!locked) {
646 lock_page(page);
647 locked = 1;
648 goto check_page;
649 }
650 if (!PageUptodate(page)) {
651 /* New page, readin or zero it */
652 init_page_for_write(page, cow_read);
653 }
654 set_page_writeback(page);
655 unlock_page(page);
656
657 return page;
658}
659
155e7524 660static enum pnfs_try_status
650e2d39 661bl_write_pagelist(struct nfs_write_data *wdata, int sync)
155e7524 662{
cd841605 663 struct nfs_pgio_header *header = wdata->header;
71cdd40f 664 int i, ret, npg_zero, pg_index, last = 0;
650e2d39 665 struct bio *bio = NULL;
71cdd40f
PT
666 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
667 sector_t isect, last_isect = 0, extent_length = 0;
5d0e3a00 668 struct parallel_io *par;
650e2d39
FI
669 loff_t offset = wdata->args.offset;
670 size_t count = wdata->args.count;
fe6e1e8d 671 unsigned int pg_offset, pg_len, saved_len;
650e2d39 672 struct page **pages = wdata->args.pages;
71cdd40f
PT
673 struct page *page;
674 pgoff_t index;
675 u64 temp;
676 int npg_per_block =
cd841605 677 NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
650e2d39
FI
678
679 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
680 /* At this point, wdata->pages is a (sequential) list of nfs_pages.
71cdd40f
PT
681 * We want to write each, and if there is an error set pnfs_error
682 * to have it redone using nfs.
650e2d39
FI
683 */
684 par = alloc_parallel(wdata);
685 if (!par)
7c5465d6 686 goto out_mds;
650e2d39
FI
687 par->pnfs_callback = bl_end_par_io_write;
688 /* At this point, have to be more careful with error handling */
689
690 isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
cd841605 691 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
71cdd40f
PT
692 if (!be || !is_writable(be, isect)) {
693 dprintk("%s no matching extents!\n", __func__);
7c5465d6 694 goto out_mds;
71cdd40f
PT
695 }
696
697 /* First page inside INVALID extent */
698 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
7c5465d6
PT
699 if (likely(!bl_push_one_short_extent(be->be_inval)))
700 par->bse_count++;
701 else
702 goto out_mds;
71cdd40f
PT
703 temp = offset >> PAGE_CACHE_SHIFT;
704 npg_zero = do_div(temp, npg_per_block);
705 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
706 (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
707 extent_length = be->be_length - (isect - be->be_f_offset);
708
709fill_invalid_ext:
710 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
711 for (;npg_zero > 0; npg_zero--) {
75422745
PT
712 if (bl_is_sector_init(be->be_inval, isect)) {
713 dprintk("isect %llu already init\n",
714 (unsigned long long)isect);
715 goto next_page;
716 }
71cdd40f
PT
717 /* page ref released in bl_end_io_write_zero */
718 index = isect >> PAGE_CACHE_SECTOR_SHIFT;
719 dprintk("%s zero %dth page: index %lu isect %llu\n",
720 __func__, npg_zero, index,
721 (unsigned long long)isect);
cd841605 722 page = bl_find_get_zeroing_page(header->inode, index,
72c50887
PT
723 cow_read);
724 if (unlikely(IS_ERR(page))) {
cd841605 725 header->pnfs_error = PTR_ERR(page);
71cdd40f 726 goto out;
72c50887 727 } else if (page == NULL)
71cdd40f 728 goto next_page;
71cdd40f
PT
729
730 ret = bl_mark_sectors_init(be->be_inval, isect,
60c52e3a 731 PAGE_CACHE_SECTORS);
71cdd40f
PT
732 if (unlikely(ret)) {
733 dprintk("%s bl_mark_sectors_init fail %d\n",
734 __func__, ret);
735 end_page_writeback(page);
736 page_cache_release(page);
cd841605 737 header->pnfs_error = ret;
71cdd40f
PT
738 goto out;
739 }
7c5465d6
PT
740 if (likely(!bl_push_one_short_extent(be->be_inval)))
741 par->bse_count++;
742 else {
743 end_page_writeback(page);
744 page_cache_release(page);
cd841605 745 header->pnfs_error = -ENOMEM;
7c5465d6
PT
746 goto out;
747 }
748 /* FIXME: This should be done in bi_end_io */
cd841605 749 mark_extents_written(BLK_LSEG2EXT(header->lseg),
7c5465d6
PT
750 page->index << PAGE_CACHE_SHIFT,
751 PAGE_CACHE_SIZE);
752
71cdd40f
PT
753 bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
754 isect, page, be,
755 bl_end_io_write_zero, par);
756 if (IS_ERR(bio)) {
cd841605 757 header->pnfs_error = PTR_ERR(bio);
e6d05a75 758 bio = NULL;
71cdd40f
PT
759 goto out;
760 }
71cdd40f
PT
761next_page:
762 isect += PAGE_CACHE_SECTORS;
763 extent_length -= PAGE_CACHE_SECTORS;
764 }
765 if (last)
766 goto write_done;
767 }
768 bio = bl_submit_bio(WRITE, bio);
769
770 /* Middle pages */
771 pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
30dd374f 772 for (i = pg_index; i < wdata->pages.npages; i++) {
650e2d39
FI
773 if (!extent_length) {
774 /* We've used up the previous extent */
775 bl_put_extent(be);
fe6e1e8d 776 bl_put_extent(cow_read);
650e2d39
FI
777 bio = bl_submit_bio(WRITE, bio);
778 /* Get the next one */
cd841605 779 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
fe6e1e8d 780 isect, &cow_read);
650e2d39 781 if (!be || !is_writable(be, isect)) {
cd841605 782 header->pnfs_error = -EINVAL;
650e2d39
FI
783 goto out;
784 }
7c5465d6
PT
785 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
786 if (likely(!bl_push_one_short_extent(
787 be->be_inval)))
788 par->bse_count++;
789 else {
cd841605 790 header->pnfs_error = -ENOMEM;
7c5465d6
PT
791 goto out;
792 }
793 }
650e2d39 794 extent_length = be->be_length -
71cdd40f 795 (isect - be->be_f_offset);
650e2d39 796 }
fe6e1e8d
PT
797
798 dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
799 pg_offset = offset & ~PAGE_CACHE_MASK;
800 if (pg_offset + count > PAGE_CACHE_SIZE)
801 pg_len = PAGE_CACHE_SIZE - pg_offset;
802 else
803 pg_len = count;
804
805 saved_len = pg_len;
806 if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
807 !bl_is_sector_init(be->be_inval, isect)) {
808 ret = bl_read_partial_page_sync(pages[i], cow_read,
809 pg_offset, pg_len, true);
810 if (ret) {
811 dprintk("%s bl_read_partial_page_sync fail %d\n",
812 __func__, ret);
813 header->pnfs_error = ret;
814 goto out;
815 }
816
71cdd40f 817 ret = bl_mark_sectors_init(be->be_inval, isect,
60c52e3a 818 PAGE_CACHE_SECTORS);
71cdd40f
PT
819 if (unlikely(ret)) {
820 dprintk("%s bl_mark_sectors_init fail %d\n",
821 __func__, ret);
cd841605 822 header->pnfs_error = ret;
71cdd40f 823 goto out;
650e2d39 824 }
fe6e1e8d
PT
825
826 /* Expand to full page write */
827 pg_offset = 0;
828 pg_len = PAGE_CACHE_SIZE;
829 } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
830 (pg_len & (SECTOR_SIZE - 1))){
831 /* ahh, nasty case. We have to do sync full sector
832 * read-modify-write cycles.
833 */
834 unsigned int saved_offset = pg_offset;
835 ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
836 pg_len, false);
837 pg_offset = round_down(pg_offset, SECTOR_SIZE);
838 pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
839 - pg_offset;
71cdd40f 840 }
fe6e1e8d
PT
841
842
843 bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
71cdd40f 844 isect, pages[i], be,
fe6e1e8d
PT
845 bl_end_io_write, par,
846 pg_offset, pg_len);
71cdd40f 847 if (IS_ERR(bio)) {
cd841605 848 header->pnfs_error = PTR_ERR(bio);
e6d05a75 849 bio = NULL;
71cdd40f 850 goto out;
650e2d39 851 }
fe6e1e8d
PT
852 offset += saved_len;
853 count -= saved_len;
650e2d39 854 isect += PAGE_CACHE_SECTORS;
71cdd40f 855 last_isect = isect;
650e2d39
FI
856 extent_length -= PAGE_CACHE_SECTORS;
857 }
71cdd40f
PT
858
859 /* Last page inside INVALID extent */
860 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
861 bio = bl_submit_bio(WRITE, bio);
862 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
863 npg_zero = npg_per_block - do_div(temp, npg_per_block);
864 if (npg_zero < npg_per_block) {
865 last = 1;
866 goto fill_invalid_ext;
867 }
868 }
869
870write_done:
fe6e1e8d 871 wdata->res.count = wdata->args.count;
650e2d39
FI
872out:
873 bl_put_extent(be);
fe6e1e8d 874 bl_put_extent(cow_read);
650e2d39
FI
875 bl_submit_bio(WRITE, bio);
876 put_parallel(par);
877 return PNFS_ATTEMPTED;
7c5465d6
PT
878out_mds:
879 bl_put_extent(be);
fe6e1e8d 880 bl_put_extent(cow_read);
7c5465d6
PT
881 kfree(par);
882 return PNFS_NOT_ATTEMPTED;
155e7524
FI
883}
884
9e692969 885/* FIXME - range ignored */
155e7524 886static void
9e692969 887release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
155e7524 888{
9e692969
FI
889 int i;
890 struct pnfs_block_extent *be;
891
892 spin_lock(&bl->bl_ext_lock);
893 for (i = 0; i < EXTENT_LISTS; i++) {
894 while (!list_empty(&bl->bl_extents[i])) {
895 be = list_first_entry(&bl->bl_extents[i],
896 struct pnfs_block_extent,
897 be_node);
898 list_del(&be->be_node);
899 bl_put_extent(be);
900 }
901 }
902 spin_unlock(&bl->bl_ext_lock);
155e7524
FI
903}
904
155e7524
FI
905static void
906release_inval_marks(struct pnfs_inval_markings *marks)
907{
c1c2a4cd 908 struct pnfs_inval_tracking *pos, *temp;
7c5465d6 909 struct pnfs_block_short_extent *se, *stemp;
c1c2a4cd
FI
910
911 list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
912 list_del(&pos->it_link);
913 kfree(pos);
914 }
7c5465d6
PT
915
916 list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
917 list_del(&se->bse_node);
918 kfree(se);
919 }
155e7524
FI
920 return;
921}
922
923static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
924{
925 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
926
927 dprintk("%s enter\n", __func__);
928 release_extents(bl, NULL);
929 release_inval_marks(&bl->bl_inval);
930 kfree(bl);
931}
932
933static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
934 gfp_t gfp_flags)
935{
936 struct pnfs_block_layout *bl;
937
938 dprintk("%s enter\n", __func__);
939 bl = kzalloc(sizeof(*bl), gfp_flags);
940 if (!bl)
941 return NULL;
942 spin_lock_init(&bl->bl_ext_lock);
943 INIT_LIST_HEAD(&bl->bl_extents[0]);
944 INIT_LIST_HEAD(&bl->bl_extents[1]);
945 INIT_LIST_HEAD(&bl->bl_commit);
946 INIT_LIST_HEAD(&bl->bl_committing);
947 bl->bl_count = 0;
948 bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
949 BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
950 return &bl->bl_layout;
951}
952
a60d2ebd 953static void bl_free_lseg(struct pnfs_layout_segment *lseg)
155e7524 954{
a60d2ebd
FI
955 dprintk("%s enter\n", __func__);
956 kfree(lseg);
155e7524
FI
957}
958
a60d2ebd
FI
959/* We pretty much ignore lseg, and store all data layout wide, so we
960 * can correctly merge.
961 */
962static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
963 struct nfs4_layoutget_res *lgr,
964 gfp_t gfp_flags)
155e7524 965{
a60d2ebd
FI
966 struct pnfs_layout_segment *lseg;
967 int status;
968
969 dprintk("%s enter\n", __func__);
970 lseg = kzalloc(sizeof(*lseg), gfp_flags);
971 if (!lseg)
972 return ERR_PTR(-ENOMEM);
973 status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
974 if (status) {
975 /* We don't want to call the full-blown bl_free_lseg,
976 * since on error extents were not touched.
977 */
978 kfree(lseg);
979 return ERR_PTR(status);
980 }
981 return lseg;
155e7524
FI
982}
983
984static void
985bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
986 const struct nfs4_layoutcommit_args *arg)
987{
90ace12a
FI
988 dprintk("%s enter\n", __func__);
989 encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
155e7524
FI
990}
991
992static void
993bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
994{
b2be7811
FI
995 struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
996
997 dprintk("%s enter\n", __func__);
998 clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
155e7524
FI
999}
1000
2f9fd182
FI
1001static void free_blk_mountid(struct block_mount_id *mid)
1002{
1003 if (mid) {
93a3844e
PT
1004 struct pnfs_block_dev *dev, *tmp;
1005
1006 /* No need to take bm_lock as we are last user freeing bm_devlist */
1007 list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
2f9fd182
FI
1008 list_del(&dev->bm_node);
1009 bl_free_block_dev(dev);
1010 }
2f9fd182
FI
1011 kfree(mid);
1012 }
1013}
1014
78e4e05c 1015/* This is mostly copied from the filelayout_get_device_info function.
2f9fd182
FI
1016 * It seems much of this should be at the generic pnfs level.
1017 */
1018static struct pnfs_block_dev *
1019nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
1020 struct nfs4_deviceid *d_id)
1021{
1022 struct pnfs_device *dev;
516f2e24 1023 struct pnfs_block_dev *rv;
2f9fd182
FI
1024 u32 max_resp_sz;
1025 int max_pages;
1026 struct page **pages = NULL;
1027 int i, rc;
1028
1029 /*
1030 * Use the session max response size as the basis for setting
1031 * GETDEVICEINFO's maxcount
1032 */
1033 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
10bd295a 1034 max_pages = nfs_page_array_len(0, max_resp_sz);
2f9fd182
FI
1035 dprintk("%s max_resp_sz %u max_pages %d\n",
1036 __func__, max_resp_sz, max_pages);
1037
1038 dev = kmalloc(sizeof(*dev), GFP_NOFS);
1039 if (!dev) {
1040 dprintk("%s kmalloc failed\n", __func__);
516f2e24 1041 return ERR_PTR(-ENOMEM);
2f9fd182
FI
1042 }
1043
1044 pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
1045 if (pages == NULL) {
1046 kfree(dev);
516f2e24 1047 return ERR_PTR(-ENOMEM);
2f9fd182
FI
1048 }
1049 for (i = 0; i < max_pages; i++) {
1050 pages[i] = alloc_page(GFP_NOFS);
516f2e24
JR
1051 if (!pages[i]) {
1052 rv = ERR_PTR(-ENOMEM);
2f9fd182 1053 goto out_free;
516f2e24 1054 }
2f9fd182
FI
1055 }
1056
1057 memcpy(&dev->dev_id, d_id, sizeof(*d_id));
1058 dev->layout_type = LAYOUT_BLOCK_VOLUME;
1059 dev->pages = pages;
1060 dev->pgbase = 0;
1061 dev->pglen = PAGE_SIZE * max_pages;
1062 dev->mincount = 0;
1063
1064 dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
1065 rc = nfs4_proc_getdeviceinfo(server, dev);
1066 dprintk("%s getdevice info returns %d\n", __func__, rc);
516f2e24
JR
1067 if (rc) {
1068 rv = ERR_PTR(rc);
2f9fd182 1069 goto out_free;
516f2e24 1070 }
2f9fd182
FI
1071
1072 rv = nfs4_blk_decode_device(server, dev);
1073 out_free:
1074 for (i = 0; i < max_pages; i++)
1075 __free_page(pages[i]);
1076 kfree(pages);
1077 kfree(dev);
1078 return rv;
1079}
1080
155e7524
FI
1081static int
1082bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
1083{
2f9fd182
FI
1084 struct block_mount_id *b_mt_id = NULL;
1085 struct pnfs_devicelist *dlist = NULL;
1086 struct pnfs_block_dev *bdev;
1087 LIST_HEAD(block_disklist);
516f2e24 1088 int status, i;
2f9fd182 1089
155e7524 1090 dprintk("%s enter\n", __func__);
2f9fd182
FI
1091
1092 if (server->pnfs_blksize == 0) {
1093 dprintk("%s Server did not return blksize\n", __func__);
1094 return -EINVAL;
1095 }
1096 b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
1097 if (!b_mt_id) {
1098 status = -ENOMEM;
1099 goto out_error;
1100 }
1101 /* Initialize nfs4 block layout mount id */
1102 spin_lock_init(&b_mt_id->bm_lock);
1103 INIT_LIST_HEAD(&b_mt_id->bm_devlist);
1104
1105 dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
1106 if (!dlist) {
1107 status = -ENOMEM;
1108 goto out_error;
1109 }
1110 dlist->eof = 0;
1111 while (!dlist->eof) {
1112 status = nfs4_proc_getdevicelist(server, fh, dlist);
1113 if (status)
1114 goto out_error;
1115 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
1116 __func__, dlist->num_devs, dlist->eof);
1117 for (i = 0; i < dlist->num_devs; i++) {
1118 bdev = nfs4_blk_get_deviceinfo(server, fh,
1119 &dlist->dev_id[i]);
516f2e24
JR
1120 if (IS_ERR(bdev)) {
1121 status = PTR_ERR(bdev);
2f9fd182
FI
1122 goto out_error;
1123 }
1124 spin_lock(&b_mt_id->bm_lock);
1125 list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
1126 spin_unlock(&b_mt_id->bm_lock);
1127 }
1128 }
1129 dprintk("%s SUCCESS\n", __func__);
1130 server->pnfs_ld_data = b_mt_id;
1131
1132 out_return:
1133 kfree(dlist);
1134 return status;
1135
1136 out_error:
1137 free_blk_mountid(b_mt_id);
1138 goto out_return;
155e7524
FI
1139}
1140
1141static int
1142bl_clear_layoutdriver(struct nfs_server *server)
1143{
2f9fd182
FI
1144 struct block_mount_id *b_mt_id = server->pnfs_ld_data;
1145
155e7524 1146 dprintk("%s enter\n", __func__);
2f9fd182
FI
1147 free_blk_mountid(b_mt_id);
1148 dprintk("%s RETURNS\n", __func__);
155e7524
FI
1149 return 0;
1150}
1151
e9643fe8 1152static const struct nfs_pageio_ops bl_pg_read_ops = {
5d0e3a00 1153 .pg_init = pnfs_generic_pg_init_read,
e9643fe8
BH
1154 .pg_test = pnfs_generic_pg_test,
1155 .pg_doio = pnfs_generic_pg_readpages,
1156};
1157
1158static const struct nfs_pageio_ops bl_pg_write_ops = {
5d0e3a00 1159 .pg_init = pnfs_generic_pg_init_write,
e9643fe8
BH
1160 .pg_test = pnfs_generic_pg_test,
1161 .pg_doio = pnfs_generic_pg_writepages,
1162};
1163
155e7524
FI
1164static struct pnfs_layoutdriver_type blocklayout_type = {
1165 .id = LAYOUT_BLOCK_VOLUME,
1166 .name = "LAYOUT_BLOCK_VOLUME",
1167 .read_pagelist = bl_read_pagelist,
1168 .write_pagelist = bl_write_pagelist,
1169 .alloc_layout_hdr = bl_alloc_layout_hdr,
1170 .free_layout_hdr = bl_free_layout_hdr,
1171 .alloc_lseg = bl_alloc_lseg,
1172 .free_lseg = bl_free_lseg,
1173 .encode_layoutcommit = bl_encode_layoutcommit,
1174 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
1175 .set_layoutdriver = bl_set_layoutdriver,
1176 .clear_layoutdriver = bl_clear_layoutdriver,
e9643fe8
BH
1177 .pg_read_ops = &bl_pg_read_ops,
1178 .pg_write_ops = &bl_pg_write_ops,
155e7524
FI
1179};
1180
fe0a9b74 1181static const struct rpc_pipe_ops bl_upcall_ops = {
c1225158 1182 .upcall = rpc_pipe_generic_upcall,
fe0a9b74
JR
1183 .downcall = bl_pipe_downcall,
1184 .destroy_msg = bl_pipe_destroy_msg,
1185};
1186
332dfab6
SK
1187static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1188 struct rpc_pipe *pipe)
1189{
1190 struct dentry *dir, *dentry;
1191
1192 dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1193 if (dir == NULL)
1194 return ERR_PTR(-ENOENT);
1195 dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1196 dput(dir);
1197 return dentry;
1198}
1199
1200static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1201 struct rpc_pipe *pipe)
1202{
1203 if (pipe->dentry)
1204 rpc_unlink(pipe->dentry);
1205}
1206
627f3066
SK
1207static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1208 void *ptr)
1209{
1210 struct super_block *sb = ptr;
1211 struct net *net = sb->s_fs_info;
1212 struct nfs_net *nn = net_generic(net, nfs_net_id);
1213 struct dentry *dentry;
1214 int ret = 0;
1215
1216 if (!try_module_get(THIS_MODULE))
1217 return 0;
1218
1219 if (nn->bl_device_pipe == NULL) {
1220 module_put(THIS_MODULE);
1221 return 0;
1222 }
1223
1224 switch (event) {
1225 case RPC_PIPEFS_MOUNT:
1226 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1227 if (IS_ERR(dentry)) {
1228 ret = PTR_ERR(dentry);
1229 break;
1230 }
1231 nn->bl_device_pipe->dentry = dentry;
1232 break;
1233 case RPC_PIPEFS_UMOUNT:
1234 if (nn->bl_device_pipe->dentry)
1235 nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1236 break;
1237 default:
1238 ret = -ENOTSUPP;
1239 break;
1240 }
1241 module_put(THIS_MODULE);
1242 return ret;
1243}
1244
1245static struct notifier_block nfs4blocklayout_block = {
1246 .notifier_call = rpc_pipefs_event,
1247};
1248
332dfab6
SK
1249static struct dentry *nfs4blocklayout_register_net(struct net *net,
1250 struct rpc_pipe *pipe)
1251{
1252 struct super_block *pipefs_sb;
1253 struct dentry *dentry;
1254
1255 pipefs_sb = rpc_get_sb_net(net);
1256 if (!pipefs_sb)
2561d618 1257 return NULL;
332dfab6
SK
1258 dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1259 rpc_put_sb_net(net);
1260 return dentry;
1261}
1262
1263static void nfs4blocklayout_unregister_net(struct net *net,
1264 struct rpc_pipe *pipe)
1265{
1266 struct super_block *pipefs_sb;
1267
1268 pipefs_sb = rpc_get_sb_net(net);
1269 if (pipefs_sb) {
1270 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1271 rpc_put_sb_net(net);
1272 }
1273}
1274
9e2e74db
SK
1275static int nfs4blocklayout_net_init(struct net *net)
1276{
1277 struct nfs_net *nn = net_generic(net, nfs_net_id);
1278 struct dentry *dentry;
1279
5ffaf855 1280 init_waitqueue_head(&nn->bl_wq);
9e2e74db
SK
1281 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1282 if (IS_ERR(nn->bl_device_pipe))
1283 return PTR_ERR(nn->bl_device_pipe);
1284 dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1285 if (IS_ERR(dentry)) {
1286 rpc_destroy_pipe_data(nn->bl_device_pipe);
1287 return PTR_ERR(dentry);
1288 }
1289 nn->bl_device_pipe->dentry = dentry;
1290 return 0;
1291}
1292
1293static void nfs4blocklayout_net_exit(struct net *net)
1294{
1295 struct nfs_net *nn = net_generic(net, nfs_net_id);
1296
1297 nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1298 rpc_destroy_pipe_data(nn->bl_device_pipe);
1299 nn->bl_device_pipe = NULL;
1300}
1301
1302static struct pernet_operations nfs4blocklayout_net_ops = {
1303 .init = nfs4blocklayout_net_init,
1304 .exit = nfs4blocklayout_net_exit,
1305};
1306
155e7524
FI
1307static int __init nfs4blocklayout_init(void)
1308{
1309 int ret;
1310
1311 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1312
1313 ret = pnfs_register_layoutdriver(&blocklayout_type);
fe0a9b74
JR
1314 if (ret)
1315 goto out;
1316
627f3066 1317 ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
9e2e74db
SK
1318 if (ret)
1319 goto out_remove;
627f3066
SK
1320 ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1321 if (ret)
1322 goto out_notifier;
fe0a9b74
JR
1323out:
1324 return ret;
1325
627f3066
SK
1326out_notifier:
1327 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
fe0a9b74
JR
1328out_remove:
1329 pnfs_unregister_layoutdriver(&blocklayout_type);
155e7524
FI
1330 return ret;
1331}
1332
1333static void __exit nfs4blocklayout_exit(void)
1334{
1335 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1336 __func__);
1337
627f3066 1338 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
9e2e74db 1339 unregister_pernet_subsys(&nfs4blocklayout_net_ops);
155e7524
FI
1340 pnfs_unregister_layoutdriver(&blocklayout_type);
1341}
1342
1343MODULE_ALIAS("nfs-layouttype4-3");
1344
1345module_init(nfs4blocklayout_init);
1346module_exit(nfs4blocklayout_exit);