Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lustre/lustre/llite/rw26.c | |
37 | * | |
38 | * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version | |
39 | */ | |
40 | ||
41 | #include <linux/kernel.h> | |
42 | #include <linux/mm.h> | |
43 | #include <linux/string.h> | |
44 | #include <linux/stat.h> | |
45 | #include <linux/errno.h> | |
46 | #include <linux/unistd.h> | |
9fb186cf | 47 | #include <linux/uaccess.h> |
d7e09d03 PT |
48 | |
49 | #include <linux/migrate.h> | |
50 | #include <linux/fs.h> | |
51 | #include <linux/buffer_head.h> | |
52 | #include <linux/mpage.h> | |
53 | #include <linux/writeback.h> | |
d7e09d03 PT |
54 | #include <linux/pagemap.h> |
55 | ||
56 | #define DEBUG_SUBSYSTEM S_LLITE | |
57 | ||
67a235f5 | 58 | #include "../include/lustre_lite.h" |
d7e09d03 | 59 | #include "llite_internal.h" |
67a235f5 | 60 | #include "../include/linux/lustre_compat25.h" |
d7e09d03 PT |
61 | |
62 | /** | |
63 | * Implements Linux VM address_space::invalidatepage() method. This method is | |
64 | * called when the page is truncate from a file, either as a result of | |
65 | * explicit truncate, or when inode is removed from memory (as a result of | |
66 | * final iput(), umount, or memory pressure induced icache shrinking). | |
67 | * | |
68 | * [0, offset] bytes of the page remain valid (this is for a case of not-page | |
69 | * aligned truncate). Lustre leaves partially truncated page in the cache, | |
70 | * relying on struct inode::i_size to limit further accesses. | |
71 | */ | |
5237c441 SR |
72 | static void ll_invalidatepage(struct page *vmpage, unsigned int offset, |
73 | unsigned int length) | |
d7e09d03 PT |
74 | { |
75 | struct inode *inode; | |
76 | struct lu_env *env; | |
77 | struct cl_page *page; | |
78 | struct cl_object *obj; | |
79 | ||
80 | int refcheck; | |
81 | ||
82 | LASSERT(PageLocked(vmpage)); | |
83 | LASSERT(!PageWriteback(vmpage)); | |
84 | ||
85 | /* | |
86 | * It is safe to not check anything in invalidatepage/releasepage | |
87 | * below because they are run with page locked and all our io is | |
88 | * happening with locked page too | |
89 | */ | |
5237c441 | 90 | if (offset == 0 && length == PAGE_CACHE_SIZE) { |
d7e09d03 PT |
91 | env = cl_env_get(&refcheck); |
92 | if (!IS_ERR(env)) { | |
93 | inode = vmpage->mapping->host; | |
94 | obj = ll_i2info(inode)->lli_clob; | |
95 | if (obj != NULL) { | |
96 | page = cl_vmpage_page(vmpage, obj); | |
97 | if (page != NULL) { | |
98 | lu_ref_add(&page->cp_reference, | |
99 | "delete", vmpage); | |
100 | cl_page_delete(env, page); | |
101 | lu_ref_del(&page->cp_reference, | |
102 | "delete", vmpage); | |
103 | cl_page_put(env, page); | |
104 | } | |
105 | } else | |
106 | LASSERT(vmpage->private == 0); | |
107 | cl_env_put(env, &refcheck); | |
108 | } | |
109 | } | |
110 | } | |
111 | ||
112 | #ifdef HAVE_RELEASEPAGE_WITH_INT | |
113 | #define RELEASEPAGE_ARG_TYPE int | |
114 | #else | |
115 | #define RELEASEPAGE_ARG_TYPE gfp_t | |
116 | #endif | |
117 | static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) | |
118 | { | |
119 | struct cl_env_nest nest; | |
120 | struct lu_env *env; | |
121 | struct cl_object *obj; | |
122 | struct cl_page *page; | |
123 | struct address_space *mapping; | |
124 | int result; | |
125 | ||
126 | LASSERT(PageLocked(vmpage)); | |
127 | if (PageWriteback(vmpage) || PageDirty(vmpage)) | |
128 | return 0; | |
129 | ||
130 | mapping = vmpage->mapping; | |
131 | if (mapping == NULL) | |
132 | return 1; | |
133 | ||
134 | obj = ll_i2info(mapping->host)->lli_clob; | |
135 | if (obj == NULL) | |
136 | return 1; | |
137 | ||
138 | /* 1 for page allocator, 1 for cl_page and 1 for page cache */ | |
139 | if (page_count(vmpage) > 3) | |
140 | return 0; | |
141 | ||
142 | /* TODO: determine what gfp should be used by @gfp_mask. */ | |
143 | env = cl_env_nested_get(&nest); | |
144 | if (IS_ERR(env)) | |
145 | /* If we can't allocate an env we won't call cl_page_put() | |
146 | * later on which further means it's impossible to drop | |
147 | * page refcount by cl_page, so ask kernel to not free | |
148 | * this page. */ | |
149 | return 0; | |
150 | ||
151 | page = cl_vmpage_page(vmpage, obj); | |
152 | result = page == NULL; | |
153 | if (page != NULL) { | |
154 | if (!cl_page_in_use(page)) { | |
155 | result = 1; | |
156 | cl_page_delete(env, page); | |
157 | } | |
158 | cl_page_put(env, page); | |
159 | } | |
160 | cl_env_nested_put(&nest, env); | |
161 | return result; | |
162 | } | |
163 | ||
164 | static int ll_set_page_dirty(struct page *vmpage) | |
165 | { | |
166 | #if 0 | |
167 | struct cl_page *page = vvp_vmpage_page_transient(vmpage); | |
168 | struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host); | |
169 | struct vvp_page *cpg; | |
170 | ||
171 | /* | |
172 | * XXX should page method be called here? | |
173 | */ | |
174 | LASSERT(&obj->co_cl == page->cp_obj); | |
175 | cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); | |
176 | /* | |
177 | * XXX cannot do much here, because page is possibly not locked: | |
178 | * sys_munmap()->... | |
179 | * ->unmap_page_range()->zap_pte_range()->set_page_dirty(). | |
180 | */ | |
181 | vvp_write_pending(obj, cpg); | |
182 | #endif | |
0a3bdb00 | 183 | return __set_page_dirty_nobuffers(vmpage); |
d7e09d03 PT |
184 | } |
185 | ||
907cd248 | 186 | #define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL) |
d7e09d03 PT |
187 | |
188 | static inline int ll_get_user_pages(int rw, unsigned long user_addr, | |
189 | size_t size, struct page ***pages, | |
190 | int *max_pages) | |
191 | { | |
192 | int result = -ENOMEM; | |
193 | ||
194 | /* set an arbitrary limit to prevent arithmetic overflow */ | |
195 | if (size > MAX_DIRECTIO_SIZE) { | |
196 | *pages = NULL; | |
197 | return -EFBIG; | |
198 | } | |
199 | ||
200 | *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
201 | *max_pages -= user_addr >> PAGE_CACHE_SHIFT; | |
202 | ||
e958f49b | 203 | *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); |
d7e09d03 | 204 | if (*pages) { |
d1a168ec JK |
205 | result = get_user_pages_fast(user_addr, *max_pages, |
206 | (rw == READ), *pages); | |
d7e09d03 | 207 | if (unlikely(result <= 0)) |
e958f49b | 208 | kvfree(*pages); |
d7e09d03 PT |
209 | } |
210 | ||
211 | return result; | |
212 | } | |
213 | ||
214 | /* ll_free_user_pages - tear down page struct array | |
215 | * @pages: array of page struct pointers underlying target buffer */ | |
216 | static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) | |
217 | { | |
218 | int i; | |
219 | ||
220 | for (i = 0; i < npages; i++) { | |
d7e09d03 PT |
221 | if (do_dirty) |
222 | set_page_dirty_lock(pages[i]); | |
223 | page_cache_release(pages[i]); | |
224 | } | |
91f79c43 | 225 | kvfree(pages); |
d7e09d03 PT |
226 | } |
227 | ||
228 | ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, | |
229 | int rw, struct inode *inode, | |
230 | struct ll_dio_pages *pv) | |
231 | { | |
232 | struct cl_page *clp; | |
233 | struct cl_2queue *queue; | |
234 | struct cl_object *obj = io->ci_obj; | |
235 | int i; | |
236 | ssize_t rc = 0; | |
237 | loff_t file_offset = pv->ldp_start_offset; | |
238 | long size = pv->ldp_size; | |
239 | int page_count = pv->ldp_nr; | |
240 | struct page **pages = pv->ldp_pages; | |
241 | long page_size = cl_page_size(obj); | |
242 | bool do_io; | |
243 | int io_pages = 0; | |
d7e09d03 PT |
244 | |
245 | queue = &io->ci_queue; | |
246 | cl_2queue_init(queue); | |
247 | for (i = 0; i < page_count; i++) { | |
248 | if (pv->ldp_offsets) | |
249 | file_offset = pv->ldp_offsets[i]; | |
250 | ||
251 | LASSERT(!(file_offset & (page_size - 1))); | |
252 | clp = cl_page_find(env, obj, cl_index(obj, file_offset), | |
253 | pv->ldp_pages[i], CPT_TRANSIENT); | |
254 | if (IS_ERR(clp)) { | |
255 | rc = PTR_ERR(clp); | |
256 | break; | |
257 | } | |
258 | ||
259 | rc = cl_page_own(env, io, clp); | |
260 | if (rc) { | |
261 | LASSERT(clp->cp_state == CPS_FREEING); | |
262 | cl_page_put(env, clp); | |
263 | break; | |
264 | } | |
265 | ||
266 | do_io = true; | |
267 | ||
268 | /* check the page type: if the page is a host page, then do | |
269 | * write directly */ | |
270 | if (clp->cp_type == CPT_CACHEABLE) { | |
271 | struct page *vmpage = cl_page_vmpage(env, clp); | |
272 | struct page *src_page; | |
273 | struct page *dst_page; | |
274 | void *src; | |
275 | void *dst; | |
276 | ||
277 | src_page = (rw == WRITE) ? pages[i] : vmpage; | |
278 | dst_page = (rw == WRITE) ? vmpage : pages[i]; | |
279 | ||
5e8ebf13 ZH |
280 | src = kmap_atomic(src_page); |
281 | dst = kmap_atomic(dst_page); | |
d7e09d03 | 282 | memcpy(dst, src, min(page_size, size)); |
5e8ebf13 ZH |
283 | kunmap_atomic(dst); |
284 | kunmap_atomic(src); | |
d7e09d03 PT |
285 | |
286 | /* make sure page will be added to the transfer by | |
287 | * cl_io_submit()->...->vvp_page_prep_write(). */ | |
288 | if (rw == WRITE) | |
289 | set_page_dirty(vmpage); | |
290 | ||
291 | if (rw == READ) { | |
292 | /* do not issue the page for read, since it | |
293 | * may reread a ra page which has NOT uptodate | |
294 | * bit set. */ | |
295 | cl_page_disown(env, io, clp); | |
296 | do_io = false; | |
297 | } | |
298 | } | |
299 | ||
300 | if (likely(do_io)) { | |
53f1a127 SB |
301 | /* |
302 | * Add a page to the incoming page list of 2-queue. | |
303 | */ | |
304 | cl_page_list_add(&queue->c2_qin, clp); | |
d7e09d03 PT |
305 | |
306 | /* | |
307 | * Set page clip to tell transfer formation engine | |
308 | * that page has to be sent even if it is beyond KMS. | |
309 | */ | |
310 | cl_page_clip(env, clp, 0, min(size, page_size)); | |
311 | ||
312 | ++io_pages; | |
313 | } | |
314 | ||
315 | /* drop the reference count for cl_page_find */ | |
316 | cl_page_put(env, clp); | |
317 | size -= page_size; | |
318 | file_offset += page_size; | |
319 | } | |
320 | ||
321 | if (rc == 0 && io_pages) { | |
322 | rc = cl_io_submit_sync(env, io, | |
323 | rw == READ ? CRT_READ : CRT_WRITE, | |
324 | queue, 0); | |
325 | } | |
326 | if (rc == 0) | |
327 | rc = pv->ldp_size; | |
328 | ||
329 | cl_2queue_discard(env, io, queue); | |
330 | cl_2queue_disown(env, io, queue); | |
331 | cl_2queue_fini(env, queue); | |
0a3bdb00 | 332 | return rc; |
d7e09d03 PT |
333 | } |
334 | EXPORT_SYMBOL(ll_direct_rw_pages); | |
335 | ||
336 | static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, | |
337 | int rw, struct inode *inode, | |
338 | struct address_space *mapping, | |
339 | size_t size, loff_t file_offset, | |
340 | struct page **pages, int page_count) | |
341 | { | |
342 | struct ll_dio_pages pvec = { .ldp_pages = pages, | |
343 | .ldp_nr = page_count, | |
344 | .ldp_size = size, | |
345 | .ldp_offsets = NULL, | |
346 | .ldp_start_offset = file_offset | |
347 | }; | |
348 | ||
349 | return ll_direct_rw_pages(env, io, rw, inode, &pvec); | |
350 | } | |
351 | ||
352 | #ifdef KMALLOC_MAX_SIZE | |
353 | #define MAX_MALLOC KMALLOC_MAX_SIZE | |
354 | #else | |
355 | #define MAX_MALLOC (128 * 1024) | |
356 | #endif | |
357 | ||
358 | /* This is the maximum size of a single O_DIRECT request, based on the | |
359 | * kmalloc limit. We need to fit all of the brw_page structs, each one | |
360 | * representing PAGE_SIZE worth of user data, into a single buffer, and | |
361 | * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is | |
362 | * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ | |
363 | #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ | |
364 | ~(DT_MAX_BRW_SIZE - 1)) | |
22c6186e OS |
365 | static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, |
366 | loff_t file_offset) | |
d7e09d03 PT |
367 | { |
368 | struct lu_env *env; | |
369 | struct cl_io *io; | |
370 | struct file *file = iocb->ki_filp; | |
371 | struct inode *inode = file->f_mapping->host; | |
372 | struct ccc_object *obj = cl_inode2ccc(inode); | |
91f79c43 AV |
373 | ssize_t count = iov_iter_count(iter); |
374 | ssize_t tot_bytes = 0, result = 0; | |
d7e09d03 | 375 | struct ll_inode_info *lli = ll_i2info(inode); |
d7e09d03 PT |
376 | long size = MAX_DIO_SIZE; |
377 | int refcheck; | |
d7e09d03 PT |
378 | |
379 | if (!lli->lli_has_smd) | |
0a3bdb00 | 380 | return -EBADF; |
d7e09d03 PT |
381 | |
382 | /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ | |
383 | if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK)) | |
0a3bdb00 | 384 | return -EINVAL; |
d7e09d03 | 385 | |
b41a1fe0 GU |
386 | CDEBUG(D_VFSTRACE, |
387 | "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", | |
d7e09d03 PT |
388 | inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, |
389 | file_offset, file_offset, count >> PAGE_CACHE_SHIFT, | |
390 | MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); | |
391 | ||
392 | /* Check that all user buffers are aligned as well */ | |
886a3911 AV |
393 | if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) |
394 | return -EINVAL; | |
d7e09d03 PT |
395 | |
396 | env = cl_env_get(&refcheck); | |
397 | LASSERT(!IS_ERR(env)); | |
398 | io = ccc_env_io(env)->cui_cl.cis_io; | |
399 | LASSERT(io != NULL); | |
400 | ||
401 | /* 0. Need locking between buffered and direct access. and race with | |
402 | * size changing by concurrent truncates and writes. | |
403 | * 1. Need inode mutex to operate transient pages. | |
404 | */ | |
6f673763 | 405 | if (iov_iter_rw(iter) == READ) |
5955102c | 406 | inode_lock(inode); |
d7e09d03 PT |
407 | |
408 | LASSERT(obj->cob_transient_pages == 0); | |
91f79c43 AV |
409 | while (iov_iter_count(iter)) { |
410 | struct page **pages; | |
411 | size_t offs; | |
d7e09d03 | 412 | |
91f79c43 | 413 | count = min_t(size_t, iov_iter_count(iter), size); |
6f673763 | 414 | if (iov_iter_rw(iter) == READ) { |
d7e09d03 PT |
415 | if (file_offset >= i_size_read(inode)) |
416 | break; | |
91f79c43 AV |
417 | if (file_offset + count > i_size_read(inode)) |
418 | count = i_size_read(inode) - file_offset; | |
d7e09d03 PT |
419 | } |
420 | ||
91f79c43 AV |
421 | result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); |
422 | if (likely(result > 0)) { | |
ef96fddd | 423 | int n = DIV_ROUND_UP(result + offs, PAGE_SIZE); |
50ffcb7e | 424 | |
6f673763 OS |
425 | result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter), |
426 | inode, file->f_mapping, | |
427 | result, file_offset, pages, | |
428 | n); | |
429 | ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); | |
91f79c43 AV |
430 | } |
431 | if (unlikely(result <= 0)) { | |
432 | /* If we can't allocate a large enough buffer | |
433 | * for the request, shrink it to a smaller | |
434 | * PAGE_SIZE multiple and try again. | |
435 | * We should always be able to kmalloc for a | |
436 | * page worth of page pointers = 4MB on i386. */ | |
437 | if (result == -ENOMEM && | |
438 | size > (PAGE_CACHE_SIZE / sizeof(*pages)) * | |
439 | PAGE_CACHE_SIZE) { | |
440 | size = ((((size / 2) - 1) | | |
441 | ~CFS_PAGE_MASK) + 1) & | |
442 | CFS_PAGE_MASK; | |
1d8cb70c | 443 | CDEBUG(D_VFSTRACE, "DIO size now %lu\n", |
91f79c43 AV |
444 | size); |
445 | continue; | |
d7e09d03 PT |
446 | } |
447 | ||
34e1f2bb | 448 | goto out; |
d7e09d03 | 449 | } |
91f79c43 AV |
450 | iov_iter_advance(iter, result); |
451 | tot_bytes += result; | |
452 | file_offset += result; | |
d7e09d03 PT |
453 | } |
454 | out: | |
455 | LASSERT(obj->cob_transient_pages == 0); | |
6f673763 | 456 | if (iov_iter_rw(iter) == READ) |
5955102c | 457 | inode_unlock(inode); |
d7e09d03 PT |
458 | |
459 | if (tot_bytes > 0) { | |
6f673763 | 460 | if (iov_iter_rw(iter) == WRITE) { |
d7e09d03 PT |
461 | struct lov_stripe_md *lsm; |
462 | ||
463 | lsm = ccc_inode_lsm_get(inode); | |
464 | LASSERT(lsm != NULL); | |
465 | lov_stripe_lock(lsm); | |
466 | obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); | |
467 | lov_stripe_unlock(lsm); | |
468 | ccc_inode_lsm_put(inode, lsm); | |
469 | } | |
470 | } | |
471 | ||
472 | cl_env_put(env, &refcheck); | |
0a3bdb00 | 473 | return tot_bytes ? : result; |
d7e09d03 PT |
474 | } |
475 | ||
476 | static int ll_write_begin(struct file *file, struct address_space *mapping, | |
477 | loff_t pos, unsigned len, unsigned flags, | |
478 | struct page **pagep, void **fsdata) | |
479 | { | |
480 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
481 | struct page *page; | |
482 | int rc; | |
483 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | |
d7e09d03 PT |
484 | |
485 | page = grab_cache_page_write_begin(mapping, index, flags); | |
486 | if (!page) | |
0a3bdb00 | 487 | return -ENOMEM; |
d7e09d03 PT |
488 | |
489 | *pagep = page; | |
490 | ||
491 | rc = ll_prepare_write(file, page, from, from + len); | |
492 | if (rc) { | |
493 | unlock_page(page); | |
494 | page_cache_release(page); | |
495 | } | |
0a3bdb00 | 496 | return rc; |
d7e09d03 PT |
497 | } |
498 | ||
499 | static int ll_write_end(struct file *file, struct address_space *mapping, | |
500 | loff_t pos, unsigned len, unsigned copied, | |
501 | struct page *page, void *fsdata) | |
502 | { | |
503 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | |
504 | int rc; | |
505 | ||
506 | rc = ll_commit_write(file, page, from, from + copied); | |
507 | unlock_page(page); | |
508 | page_cache_release(page); | |
509 | ||
510 | return rc ?: copied; | |
511 | } | |
512 | ||
513 | #ifdef CONFIG_MIGRATION | |
2d95f10e JH |
514 | static int ll_migratepage(struct address_space *mapping, |
515 | struct page *newpage, struct page *page, | |
516 | enum migrate_mode mode | |
d7e09d03 PT |
517 | ) |
518 | { | |
519 | /* Always fail page migration until we have a proper implementation */ | |
520 | return -EIO; | |
521 | } | |
522 | #endif | |
523 | ||
2d95f10e JH |
524 | const struct address_space_operations ll_aops = { |
525 | .readpage = ll_readpage, | |
d7e09d03 PT |
526 | .direct_IO = ll_direct_IO_26, |
527 | .writepage = ll_writepage, | |
528 | .writepages = ll_writepages, | |
529 | .set_page_dirty = ll_set_page_dirty, | |
530 | .write_begin = ll_write_begin, | |
531 | .write_end = ll_write_end, | |
532 | .invalidatepage = ll_invalidatepage, | |
533 | .releasepage = (void *)ll_releasepage, | |
534 | #ifdef CONFIG_MIGRATION | |
535 | .migratepage = ll_migratepage, | |
536 | #endif | |
d7e09d03 | 537 | }; |