direct_IO: use iov_iter_rw() instead of rw everywhere
[linux-2.6-block.git] / drivers / staging / lustre / lustre / llite / rw26.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/lustre/llite/rw26.c
37 *
38 * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
39 */
40
41#include <linux/kernel.h>
42#include <linux/mm.h>
43#include <linux/string.h>
44#include <linux/stat.h>
45#include <linux/errno.h>
46#include <linux/unistd.h>
47#include <asm/uaccess.h>
48
49#include <linux/migrate.h>
50#include <linux/fs.h>
51#include <linux/buffer_head.h>
52#include <linux/mpage.h>
53#include <linux/writeback.h>
d7e09d03
PT
54#include <linux/pagemap.h>
55
56#define DEBUG_SUBSYSTEM S_LLITE
57
67a235f5 58#include "../include/lustre_lite.h"
d7e09d03 59#include "llite_internal.h"
67a235f5 60#include "../include/linux/lustre_compat25.h"
d7e09d03
PT
61
62/**
63 * Implements Linux VM address_space::invalidatepage() method. This method is
64 * called when the page is truncate from a file, either as a result of
65 * explicit truncate, or when inode is removed from memory (as a result of
66 * final iput(), umount, or memory pressure induced icache shrinking).
67 *
68 * [0, offset] bytes of the page remain valid (this is for a case of not-page
69 * aligned truncate). Lustre leaves partially truncated page in the cache,
70 * relying on struct inode::i_size to limit further accesses.
71 */
5237c441
SR
72static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
73 unsigned int length)
d7e09d03
PT
74{
75 struct inode *inode;
76 struct lu_env *env;
77 struct cl_page *page;
78 struct cl_object *obj;
79
80 int refcheck;
81
82 LASSERT(PageLocked(vmpage));
83 LASSERT(!PageWriteback(vmpage));
84
85 /*
86 * It is safe to not check anything in invalidatepage/releasepage
87 * below because they are run with page locked and all our io is
88 * happening with locked page too
89 */
5237c441 90 if (offset == 0 && length == PAGE_CACHE_SIZE) {
d7e09d03
PT
91 env = cl_env_get(&refcheck);
92 if (!IS_ERR(env)) {
93 inode = vmpage->mapping->host;
94 obj = ll_i2info(inode)->lli_clob;
95 if (obj != NULL) {
96 page = cl_vmpage_page(vmpage, obj);
97 if (page != NULL) {
98 lu_ref_add(&page->cp_reference,
99 "delete", vmpage);
100 cl_page_delete(env, page);
101 lu_ref_del(&page->cp_reference,
102 "delete", vmpage);
103 cl_page_put(env, page);
104 }
105 } else
106 LASSERT(vmpage->private == 0);
107 cl_env_put(env, &refcheck);
108 }
109 }
110}
111
112#ifdef HAVE_RELEASEPAGE_WITH_INT
113#define RELEASEPAGE_ARG_TYPE int
114#else
115#define RELEASEPAGE_ARG_TYPE gfp_t
116#endif
117static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
118{
119 struct cl_env_nest nest;
120 struct lu_env *env;
121 struct cl_object *obj;
122 struct cl_page *page;
123 struct address_space *mapping;
124 int result;
125
126 LASSERT(PageLocked(vmpage));
127 if (PageWriteback(vmpage) || PageDirty(vmpage))
128 return 0;
129
130 mapping = vmpage->mapping;
131 if (mapping == NULL)
132 return 1;
133
134 obj = ll_i2info(mapping->host)->lli_clob;
135 if (obj == NULL)
136 return 1;
137
138 /* 1 for page allocator, 1 for cl_page and 1 for page cache */
139 if (page_count(vmpage) > 3)
140 return 0;
141
142 /* TODO: determine what gfp should be used by @gfp_mask. */
143 env = cl_env_nested_get(&nest);
144 if (IS_ERR(env))
145 /* If we can't allocate an env we won't call cl_page_put()
146 * later on which further means it's impossible to drop
147 * page refcount by cl_page, so ask kernel to not free
148 * this page. */
149 return 0;
150
151 page = cl_vmpage_page(vmpage, obj);
152 result = page == NULL;
153 if (page != NULL) {
154 if (!cl_page_in_use(page)) {
155 result = 1;
156 cl_page_delete(env, page);
157 }
158 cl_page_put(env, page);
159 }
160 cl_env_nested_put(&nest, env);
161 return result;
162}
163
164static int ll_set_page_dirty(struct page *vmpage)
165{
166#if 0
167 struct cl_page *page = vvp_vmpage_page_transient(vmpage);
168 struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
169 struct vvp_page *cpg;
170
171 /*
172 * XXX should page method be called here?
173 */
174 LASSERT(&obj->co_cl == page->cp_obj);
175 cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
176 /*
177 * XXX cannot do much here, because page is possibly not locked:
178 * sys_munmap()->...
179 * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
180 */
181 vvp_write_pending(obj, cpg);
182#endif
0a3bdb00 183 return __set_page_dirty_nobuffers(vmpage);
d7e09d03
PT
184}
185
907cd248 186#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
d7e09d03
PT
187
188static inline int ll_get_user_pages(int rw, unsigned long user_addr,
189 size_t size, struct page ***pages,
190 int *max_pages)
191{
192 int result = -ENOMEM;
193
194 /* set an arbitrary limit to prevent arithmetic overflow */
195 if (size > MAX_DIRECTIO_SIZE) {
196 *pages = NULL;
197 return -EFBIG;
198 }
199
200 *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
201 *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
202
203 OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
204 if (*pages) {
d1a168ec
JK
205 result = get_user_pages_fast(user_addr, *max_pages,
206 (rw == READ), *pages);
d7e09d03
PT
207 if (unlikely(result <= 0))
208 OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
209 }
210
211 return result;
212}
213
214/* ll_free_user_pages - tear down page struct array
215 * @pages: array of page struct pointers underlying target buffer */
216static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
217{
218 int i;
219
220 for (i = 0; i < npages; i++) {
d7e09d03
PT
221 if (do_dirty)
222 set_page_dirty_lock(pages[i]);
223 page_cache_release(pages[i]);
224 }
91f79c43 225 kvfree(pages);
d7e09d03
PT
226}
227
228ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
229 int rw, struct inode *inode,
230 struct ll_dio_pages *pv)
231{
232 struct cl_page *clp;
233 struct cl_2queue *queue;
234 struct cl_object *obj = io->ci_obj;
235 int i;
236 ssize_t rc = 0;
237 loff_t file_offset = pv->ldp_start_offset;
238 long size = pv->ldp_size;
239 int page_count = pv->ldp_nr;
240 struct page **pages = pv->ldp_pages;
241 long page_size = cl_page_size(obj);
242 bool do_io;
243 int io_pages = 0;
d7e09d03
PT
244
245 queue = &io->ci_queue;
246 cl_2queue_init(queue);
247 for (i = 0; i < page_count; i++) {
248 if (pv->ldp_offsets)
249 file_offset = pv->ldp_offsets[i];
250
251 LASSERT(!(file_offset & (page_size - 1)));
252 clp = cl_page_find(env, obj, cl_index(obj, file_offset),
253 pv->ldp_pages[i], CPT_TRANSIENT);
254 if (IS_ERR(clp)) {
255 rc = PTR_ERR(clp);
256 break;
257 }
258
259 rc = cl_page_own(env, io, clp);
260 if (rc) {
261 LASSERT(clp->cp_state == CPS_FREEING);
262 cl_page_put(env, clp);
263 break;
264 }
265
266 do_io = true;
267
268 /* check the page type: if the page is a host page, then do
269 * write directly */
270 if (clp->cp_type == CPT_CACHEABLE) {
271 struct page *vmpage = cl_page_vmpage(env, clp);
272 struct page *src_page;
273 struct page *dst_page;
274 void *src;
275 void *dst;
276
277 src_page = (rw == WRITE) ? pages[i] : vmpage;
278 dst_page = (rw == WRITE) ? vmpage : pages[i];
279
5e8ebf13
ZH
280 src = kmap_atomic(src_page);
281 dst = kmap_atomic(dst_page);
d7e09d03 282 memcpy(dst, src, min(page_size, size));
5e8ebf13
ZH
283 kunmap_atomic(dst);
284 kunmap_atomic(src);
d7e09d03
PT
285
286 /* make sure page will be added to the transfer by
287 * cl_io_submit()->...->vvp_page_prep_write(). */
288 if (rw == WRITE)
289 set_page_dirty(vmpage);
290
291 if (rw == READ) {
292 /* do not issue the page for read, since it
293 * may reread a ra page which has NOT uptodate
294 * bit set. */
295 cl_page_disown(env, io, clp);
296 do_io = false;
297 }
298 }
299
300 if (likely(do_io)) {
301 cl_2queue_add(queue, clp);
302
303 /*
304 * Set page clip to tell transfer formation engine
305 * that page has to be sent even if it is beyond KMS.
306 */
307 cl_page_clip(env, clp, 0, min(size, page_size));
308
309 ++io_pages;
310 }
311
312 /* drop the reference count for cl_page_find */
313 cl_page_put(env, clp);
314 size -= page_size;
315 file_offset += page_size;
316 }
317
318 if (rc == 0 && io_pages) {
319 rc = cl_io_submit_sync(env, io,
320 rw == READ ? CRT_READ : CRT_WRITE,
321 queue, 0);
322 }
323 if (rc == 0)
324 rc = pv->ldp_size;
325
326 cl_2queue_discard(env, io, queue);
327 cl_2queue_disown(env, io, queue);
328 cl_2queue_fini(env, queue);
0a3bdb00 329 return rc;
d7e09d03
PT
330}
331EXPORT_SYMBOL(ll_direct_rw_pages);
332
333static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
334 int rw, struct inode *inode,
335 struct address_space *mapping,
336 size_t size, loff_t file_offset,
337 struct page **pages, int page_count)
338{
339 struct ll_dio_pages pvec = { .ldp_pages = pages,
340 .ldp_nr = page_count,
341 .ldp_size = size,
342 .ldp_offsets = NULL,
343 .ldp_start_offset = file_offset
344 };
345
346 return ll_direct_rw_pages(env, io, rw, inode, &pvec);
347}
348
349#ifdef KMALLOC_MAX_SIZE
350#define MAX_MALLOC KMALLOC_MAX_SIZE
351#else
352#define MAX_MALLOC (128 * 1024)
353#endif
354
355/* This is the maximum size of a single O_DIRECT request, based on the
356 * kmalloc limit. We need to fit all of the brw_page structs, each one
357 * representing PAGE_SIZE worth of user data, into a single buffer, and
358 * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
359 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
360#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
361 ~(DT_MAX_BRW_SIZE - 1))
362static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
d8d3d94b 363 struct iov_iter *iter, loff_t file_offset)
d7e09d03
PT
364{
365 struct lu_env *env;
366 struct cl_io *io;
367 struct file *file = iocb->ki_filp;
368 struct inode *inode = file->f_mapping->host;
369 struct ccc_object *obj = cl_inode2ccc(inode);
91f79c43
AV
370 ssize_t count = iov_iter_count(iter);
371 ssize_t tot_bytes = 0, result = 0;
d7e09d03 372 struct ll_inode_info *lli = ll_i2info(inode);
d7e09d03
PT
373 long size = MAX_DIO_SIZE;
374 int refcheck;
d7e09d03
PT
375
376 if (!lli->lli_has_smd)
0a3bdb00 377 return -EBADF;
d7e09d03
PT
378
379 /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
380 if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
0a3bdb00 381 return -EINVAL;
d7e09d03 382
b41a1fe0
GU
383 CDEBUG(D_VFSTRACE,
384 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
d7e09d03
PT
385 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
386 file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
387 MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
388
389 /* Check that all user buffers are aligned as well */
886a3911
AV
390 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
391 return -EINVAL;
d7e09d03
PT
392
393 env = cl_env_get(&refcheck);
394 LASSERT(!IS_ERR(env));
395 io = ccc_env_io(env)->cui_cl.cis_io;
396 LASSERT(io != NULL);
397
398 /* 0. Need locking between buffered and direct access. and race with
399 * size changing by concurrent truncates and writes.
400 * 1. Need inode mutex to operate transient pages.
401 */
6f673763 402 if (iov_iter_rw(iter) == READ)
d7e09d03
PT
403 mutex_lock(&inode->i_mutex);
404
405 LASSERT(obj->cob_transient_pages == 0);
91f79c43
AV
406 while (iov_iter_count(iter)) {
407 struct page **pages;
408 size_t offs;
d7e09d03 409
91f79c43 410 count = min_t(size_t, iov_iter_count(iter), size);
6f673763 411 if (iov_iter_rw(iter) == READ) {
d7e09d03
PT
412 if (file_offset >= i_size_read(inode))
413 break;
91f79c43
AV
414 if (file_offset + count > i_size_read(inode))
415 count = i_size_read(inode) - file_offset;
d7e09d03
PT
416 }
417
91f79c43
AV
418 result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
419 if (likely(result > 0)) {
ef96fddd 420 int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
6f673763
OS
421 result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
422 inode, file->f_mapping,
423 result, file_offset, pages,
424 n);
425 ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
91f79c43
AV
426 }
427 if (unlikely(result <= 0)) {
428 /* If we can't allocate a large enough buffer
429 * for the request, shrink it to a smaller
430 * PAGE_SIZE multiple and try again.
431 * We should always be able to kmalloc for a
432 * page worth of page pointers = 4MB on i386. */
433 if (result == -ENOMEM &&
434 size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
435 PAGE_CACHE_SIZE) {
436 size = ((((size / 2) - 1) |
437 ~CFS_PAGE_MASK) + 1) &
438 CFS_PAGE_MASK;
1d8cb70c 439 CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
91f79c43
AV
440 size);
441 continue;
d7e09d03
PT
442 }
443
34e1f2bb 444 goto out;
d7e09d03 445 }
91f79c43
AV
446 iov_iter_advance(iter, result);
447 tot_bytes += result;
448 file_offset += result;
d7e09d03
PT
449 }
450out:
451 LASSERT(obj->cob_transient_pages == 0);
6f673763 452 if (iov_iter_rw(iter) == READ)
d7e09d03
PT
453 mutex_unlock(&inode->i_mutex);
454
455 if (tot_bytes > 0) {
6f673763 456 if (iov_iter_rw(iter) == WRITE) {
d7e09d03
PT
457 struct lov_stripe_md *lsm;
458
459 lsm = ccc_inode_lsm_get(inode);
460 LASSERT(lsm != NULL);
461 lov_stripe_lock(lsm);
462 obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
463 lov_stripe_unlock(lsm);
464 ccc_inode_lsm_put(inode, lsm);
465 }
466 }
467
468 cl_env_put(env, &refcheck);
0a3bdb00 469 return tot_bytes ? : result;
d7e09d03
PT
470}
471
472static int ll_write_begin(struct file *file, struct address_space *mapping,
473 loff_t pos, unsigned len, unsigned flags,
474 struct page **pagep, void **fsdata)
475{
476 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
477 struct page *page;
478 int rc;
479 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
d7e09d03
PT
480
481 page = grab_cache_page_write_begin(mapping, index, flags);
482 if (!page)
0a3bdb00 483 return -ENOMEM;
d7e09d03
PT
484
485 *pagep = page;
486
487 rc = ll_prepare_write(file, page, from, from + len);
488 if (rc) {
489 unlock_page(page);
490 page_cache_release(page);
491 }
0a3bdb00 492 return rc;
d7e09d03
PT
493}
494
495static int ll_write_end(struct file *file, struct address_space *mapping,
496 loff_t pos, unsigned len, unsigned copied,
497 struct page *page, void *fsdata)
498{
499 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
500 int rc;
501
502 rc = ll_commit_write(file, page, from, from + copied);
503 unlock_page(page);
504 page_cache_release(page);
505
506 return rc ?: copied;
507}
508
509#ifdef CONFIG_MIGRATION
2d95f10e
JH
510static int ll_migratepage(struct address_space *mapping,
511 struct page *newpage, struct page *page,
512 enum migrate_mode mode
d7e09d03
PT
513 )
514{
515 /* Always fail page migration until we have a proper implementation */
516 return -EIO;
517}
518#endif
519
520#ifndef MS_HAS_NEW_AOPS
2d95f10e
JH
521const struct address_space_operations ll_aops = {
522 .readpage = ll_readpage,
d7e09d03
PT
523 .direct_IO = ll_direct_IO_26,
524 .writepage = ll_writepage,
525 .writepages = ll_writepages,
526 .set_page_dirty = ll_set_page_dirty,
527 .write_begin = ll_write_begin,
528 .write_end = ll_write_end,
529 .invalidatepage = ll_invalidatepage,
530 .releasepage = (void *)ll_releasepage,
531#ifdef CONFIG_MIGRATION
532 .migratepage = ll_migratepage,
533#endif
d7e09d03
PT
534};
535#else
2d95f10e 536const struct address_space_operations_ext ll_aops = {
d7e09d03 537 .orig_aops.readpage = ll_readpage,
588bf52a 538/* .orig_aops.readpages = ll_readpages, */
d7e09d03
PT
539 .orig_aops.direct_IO = ll_direct_IO_26,
540 .orig_aops.writepage = ll_writepage,
541 .orig_aops.writepages = ll_writepages,
542 .orig_aops.set_page_dirty = ll_set_page_dirty,
543 .orig_aops.prepare_write = ll_prepare_write,
544 .orig_aops.commit_write = ll_commit_write,
545 .orig_aops.invalidatepage = ll_invalidatepage,
546 .orig_aops.releasepage = ll_releasepage,
547#ifdef CONFIG_MIGRATION
548 .orig_aops.migratepage = ll_migratepage,
549#endif
d7e09d03
PT
550 .write_begin = ll_write_begin,
551 .write_end = ll_write_end
552};
553#endif