Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
1dc563a6 | 30 | * Copyright (c) 2011, 2015, Intel Corporation. |
d7e09d03 PT |
31 | */ |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lustre/llite/rw.c | |
37 | * | |
38 | * Lustre Lite I/O page cache routines shared by different kernel revs | |
39 | */ | |
40 | ||
41 | #include <linux/kernel.h> | |
42 | #include <linux/mm.h> | |
43 | #include <linux/string.h> | |
44 | #include <linux/stat.h> | |
45 | #include <linux/errno.h> | |
46 | #include <linux/unistd.h> | |
47 | #include <linux/writeback.h> | |
e8fd99fd | 48 | #include <linux/uaccess.h> |
d7e09d03 PT |
49 | |
50 | #include <linux/fs.h> | |
d7e09d03 PT |
51 | #include <linux/pagemap.h> |
52 | /* current_is_kswapd() */ | |
53 | #include <linux/swap.h> | |
54 | ||
55 | #define DEBUG_SUBSYSTEM S_LLITE | |
56 | ||
67a235f5 GKH |
57 | #include "../include/lustre_lite.h" |
58 | #include "../include/obd_cksum.h" | |
d7e09d03 | 59 | #include "llite_internal.h" |
67a235f5 | 60 | #include "../include/linux/lustre_compat25.h" |
d7e09d03 PT |
61 | |
62 | /** | |
63 | * Finalizes cl-data before exiting typical address_space operation. Dual to | |
64 | * ll_cl_init(). | |
65 | */ | |
66 | static void ll_cl_fini(struct ll_cl_context *lcc) | |
67 | { | |
68 | struct lu_env *env = lcc->lcc_env; | |
69 | struct cl_io *io = lcc->lcc_io; | |
70 | struct cl_page *page = lcc->lcc_page; | |
71 | ||
72 | LASSERT(lcc->lcc_cookie == current); | |
73 | LASSERT(env != NULL); | |
74 | ||
75 | if (page != NULL) { | |
76 | lu_ref_del(&page->cp_reference, "cl_io", io); | |
77 | cl_page_put(env, page); | |
78 | } | |
79 | ||
d7e09d03 PT |
80 | cl_env_put(env, &lcc->lcc_refcheck); |
81 | } | |
82 | ||
83 | /** | |
84 | * Initializes common cl-data at the typical address_space operation entry | |
85 | * point. | |
86 | */ | |
87 | static struct ll_cl_context *ll_cl_init(struct file *file, | |
88 | struct page *vmpage, int create) | |
89 | { | |
90 | struct ll_cl_context *lcc; | |
91 | struct lu_env *env; | |
92 | struct cl_io *io; | |
93 | struct cl_object *clob; | |
94 | struct ccc_io *cio; | |
95 | ||
96 | int refcheck; | |
97 | int result = 0; | |
98 | ||
99 | clob = ll_i2info(vmpage->mapping->host)->lli_clob; | |
100 | LASSERT(clob != NULL); | |
101 | ||
102 | env = cl_env_get(&refcheck); | |
103 | if (IS_ERR(env)) | |
1dc8548c | 104 | return ERR_CAST(env); |
d7e09d03 PT |
105 | |
106 | lcc = &vvp_env_info(env)->vti_io_ctx; | |
107 | memset(lcc, 0, sizeof(*lcc)); | |
108 | lcc->lcc_env = env; | |
109 | lcc->lcc_refcheck = refcheck; | |
110 | lcc->lcc_cookie = current; | |
111 | ||
112 | cio = ccc_env_io(env); | |
113 | io = cio->cui_cl.cis_io; | |
114 | if (io == NULL && create) { | |
115 | struct inode *inode = vmpage->mapping->host; | |
116 | loff_t pos; | |
117 | ||
5955102c AV |
118 | if (inode_trylock(inode)) { |
119 | inode_unlock((inode)); | |
d7e09d03 PT |
120 | |
121 | /* this is too bad. Someone is trying to write the | |
122 | * page w/o holding inode mutex. This means we can | |
123 | * add dirty pages into cache during truncate */ | |
2d00bd17 JP |
124 | CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n", |
125 | current->comm); | |
5d4450c4 | 126 | dump_stack(); |
d7e09d03 PT |
127 | LBUG(); |
128 | return ERR_PTR(-EIO); | |
129 | } | |
130 | ||
131 | /* | |
74c0da19 | 132 | * Loop-back driver calls ->prepare_write(). |
d7e09d03 PT |
133 | * methods directly, bypassing file system ->write() operation, |
134 | * so cl_io has to be created here. | |
135 | */ | |
136 | io = ccc_env_thread_io(env); | |
137 | ll_io_init(io, file, 1); | |
138 | ||
139 | /* No lock at all for this kind of IO - we can't do it because | |
140 | * we have held page lock, it would cause deadlock. | |
141 | * XXX: This causes poor performance to loop device - One page | |
142 | * per RPC. | |
143 | * In order to get better performance, users should use | |
144 | * lloop driver instead. | |
145 | */ | |
146 | io->ci_lockreq = CILR_NEVER; | |
147 | ||
1d06bb4e | 148 | pos = vmpage->index << PAGE_CACHE_SHIFT; |
d7e09d03 PT |
149 | |
150 | /* Create a temp IO to serve write. */ | |
151 | result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); | |
152 | if (result == 0) { | |
153 | cio->cui_fd = LUSTRE_FPRIVATE(file); | |
b42b15fd | 154 | cio->cui_iter = NULL; |
d7e09d03 PT |
155 | result = cl_io_iter_init(env, io); |
156 | if (result == 0) { | |
157 | result = cl_io_lock(env, io); | |
158 | if (result == 0) | |
159 | result = cl_io_start(env, io); | |
160 | } | |
161 | } else | |
162 | result = io->ci_result; | |
d7e09d03 PT |
163 | } |
164 | ||
165 | lcc->lcc_io = io; | |
166 | if (io == NULL) | |
167 | result = -EIO; | |
168 | if (result == 0) { | |
169 | struct cl_page *page; | |
170 | ||
171 | LASSERT(io != NULL); | |
172 | LASSERT(io->ci_state == CIS_IO_GOING); | |
173 | LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file)); | |
174 | page = cl_page_find(env, clob, vmpage->index, vmpage, | |
175 | CPT_CACHEABLE); | |
176 | if (!IS_ERR(page)) { | |
177 | lcc->lcc_page = page; | |
178 | lu_ref_add(&page->cp_reference, "cl_io", io); | |
179 | result = 0; | |
180 | } else | |
181 | result = PTR_ERR(page); | |
182 | } | |
183 | if (result) { | |
184 | ll_cl_fini(lcc); | |
185 | lcc = ERR_PTR(result); | |
186 | } | |
187 | ||
188 | CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n", | |
189 | vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result, | |
190 | env, io); | |
191 | return lcc; | |
192 | } | |
193 | ||
194 | static struct ll_cl_context *ll_cl_get(void) | |
195 | { | |
196 | struct ll_cl_context *lcc; | |
197 | struct lu_env *env; | |
198 | int refcheck; | |
199 | ||
200 | env = cl_env_get(&refcheck); | |
201 | LASSERT(!IS_ERR(env)); | |
202 | lcc = &vvp_env_info(env)->vti_io_ctx; | |
203 | LASSERT(env == lcc->lcc_env); | |
204 | LASSERT(current == lcc->lcc_cookie); | |
205 | cl_env_put(env, &refcheck); | |
206 | ||
207 | /* env has got in ll_cl_init, so it is still usable. */ | |
208 | return lcc; | |
209 | } | |
210 | ||
211 | /** | |
212 | * ->prepare_write() address space operation called by generic_file_write() | |
213 | * for every page during write. | |
214 | */ | |
215 | int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from, | |
216 | unsigned to) | |
217 | { | |
218 | struct ll_cl_context *lcc; | |
219 | int result; | |
d7e09d03 PT |
220 | |
221 | lcc = ll_cl_init(file, vmpage, 1); | |
222 | if (!IS_ERR(lcc)) { | |
223 | struct lu_env *env = lcc->lcc_env; | |
224 | struct cl_io *io = lcc->lcc_io; | |
225 | struct cl_page *page = lcc->lcc_page; | |
226 | ||
227 | cl_page_assume(env, io, page); | |
228 | ||
229 | result = cl_io_prepare_write(env, io, page, from, to); | |
230 | if (result == 0) { | |
231 | /* | |
232 | * Add a reference, so that page is not evicted from | |
233 | * the cache until ->commit_write() is called. | |
234 | */ | |
235 | cl_page_get(page); | |
236 | lu_ref_add(&page->cp_reference, "prepare_write", | |
237 | current); | |
238 | } else { | |
239 | cl_page_unassume(env, io, page); | |
240 | ll_cl_fini(lcc); | |
241 | } | |
242 | /* returning 0 in prepare assumes commit must be called | |
243 | * afterwards */ | |
244 | } else { | |
245 | result = PTR_ERR(lcc); | |
246 | } | |
0a3bdb00 | 247 | return result; |
d7e09d03 PT |
248 | } |
249 | ||
250 | int ll_commit_write(struct file *file, struct page *vmpage, unsigned from, | |
251 | unsigned to) | |
252 | { | |
253 | struct ll_cl_context *lcc; | |
254 | struct lu_env *env; | |
255 | struct cl_io *io; | |
256 | struct cl_page *page; | |
257 | int result = 0; | |
d7e09d03 PT |
258 | |
259 | lcc = ll_cl_get(); | |
260 | env = lcc->lcc_env; | |
261 | page = lcc->lcc_page; | |
262 | io = lcc->lcc_io; | |
263 | ||
264 | LASSERT(cl_page_is_owned(page, io)); | |
265 | LASSERT(from <= to); | |
266 | if (from != to) /* handle short write case. */ | |
267 | result = cl_io_commit_write(env, io, page, from, to); | |
268 | if (cl_page_is_owned(page, io)) | |
269 | cl_page_unassume(env, io, page); | |
270 | ||
271 | /* | |
272 | * Release reference acquired by ll_prepare_write(). | |
273 | */ | |
274 | lu_ref_del(&page->cp_reference, "prepare_write", current); | |
275 | cl_page_put(env, page); | |
276 | ll_cl_fini(lcc); | |
0a3bdb00 | 277 | return result; |
d7e09d03 PT |
278 | } |
279 | ||
d7e09d03 PT |
280 | static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); |
281 | ||
282 | /** | |
283 | * Get readahead pages from the filesystem readahead pool of the client for a | |
284 | * thread. | |
285 | * | |
286 | * /param sbi superblock for filesystem readahead state ll_ra_info | |
287 | * /param ria per-thread readahead state | |
288 | * /param pages number of pages requested for readahead for the thread. | |
289 | * | |
290 | * WARNING: This algorithm is used to reduce contention on sbi->ll_lock. | |
291 | * It should work well if the ra_max_pages is much greater than the single | |
292 | * file's read-ahead window, and not too many threads contending for | |
293 | * these readahead pages. | |
294 | * | |
295 | * TODO: There may be a 'global sync problem' if many threads are trying | |
296 | * to get an ra budget that is larger than the remaining readahead pages | |
297 | * and reach here at exactly the same time. They will compute /a ret to | |
298 | * consume the remaining pages, but will fail at atomic_add_return() and | |
299 | * get a zero ra window, although there is still ra space remaining. - Jay */ | |
300 | ||
301 | static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, | |
302 | struct ra_io_arg *ria, | |
303 | unsigned long pages) | |
304 | { | |
305 | struct ll_ra_info *ra = &sbi->ll_ra_info; | |
306 | long ret; | |
d7e09d03 PT |
307 | |
308 | /* If read-ahead pages left are less than 1M, do not do read-ahead, | |
309 | * otherwise it will form small read RPC(< 1M), which hurt server | |
310 | * performance a lot. */ | |
311 | ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages); | |
34e1f2bb JL |
312 | if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) { |
313 | ret = 0; | |
314 | goto out; | |
315 | } | |
d7e09d03 PT |
316 | |
317 | /* If the non-strided (ria_pages == 0) readahead window | |
318 | * (ria_start + ret) has grown across an RPC boundary, then trim | |
319 | * readahead size by the amount beyond the RPC so it ends on an | |
320 | * RPC boundary. If the readahead window is already ending on | |
321 | * an RPC boundary (beyond_rpc == 0), or smaller than a full | |
322 | * RPC (beyond_rpc < ret) the readahead size is unchanged. | |
323 | * The (beyond_rpc != 0) check is skipped since the conditional | |
324 | * branch is more expensive than subtracting zero from the result. | |
325 | * | |
326 | * Strided read is left unaligned to avoid small fragments beyond | |
327 | * the RPC boundary from needing an extra read RPC. */ | |
328 | if (ria->ria_pages == 0) { | |
329 | long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; | |
50ffcb7e | 330 | |
d7e09d03 PT |
331 | if (/* beyond_rpc != 0 && */ beyond_rpc < ret) |
332 | ret -= beyond_rpc; | |
333 | } | |
334 | ||
335 | if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) { | |
336 | atomic_sub(ret, &ra->ra_cur_pages); | |
337 | ret = 0; | |
338 | } | |
339 | ||
340 | out: | |
0a3bdb00 | 341 | return ret; |
d7e09d03 PT |
342 | } |
343 | ||
344 | void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) | |
345 | { | |
346 | struct ll_ra_info *ra = &sbi->ll_ra_info; | |
50ffcb7e | 347 | |
d7e09d03 PT |
348 | atomic_sub(len, &ra->ra_cur_pages); |
349 | } | |
350 | ||
351 | static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) | |
352 | { | |
353 | LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which); | |
354 | lprocfs_counter_incr(sbi->ll_ra_stats, which); | |
355 | } | |
356 | ||
357 | void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) | |
358 | { | |
359 | struct ll_sb_info *sbi = ll_i2sbi(mapping->host); | |
50ffcb7e | 360 | |
d7e09d03 PT |
361 | ll_ra_stats_inc_sbi(sbi, which); |
362 | } | |
363 | ||
364 | #define RAS_CDEBUG(ras) \ | |
365 | CDEBUG(D_READA, \ | |
366 | "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \ | |
367 | "csr %lu sf %lu sp %lu sl %lu \n", \ | |
368 | ras->ras_last_readpage, ras->ras_consecutive_requests, \ | |
369 | ras->ras_consecutive_pages, ras->ras_window_start, \ | |
370 | ras->ras_window_len, ras->ras_next_readahead, \ | |
371 | ras->ras_requests, ras->ras_request_index, \ | |
372 | ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \ | |
373 | ras->ras_stride_pages, ras->ras_stride_length) | |
374 | ||
375 | static int index_in_window(unsigned long index, unsigned long point, | |
376 | unsigned long before, unsigned long after) | |
377 | { | |
378 | unsigned long start = point - before, end = point + after; | |
379 | ||
380 | if (start > point) | |
381 | start = 0; | |
382 | if (end < point) | |
383 | end = ~0; | |
384 | ||
385 | return start <= index && index <= end; | |
386 | } | |
387 | ||
388 | static struct ll_readahead_state *ll_ras_get(struct file *f) | |
389 | { | |
390 | struct ll_file_data *fd; | |
391 | ||
392 | fd = LUSTRE_FPRIVATE(f); | |
393 | return &fd->fd_ras; | |
394 | } | |
395 | ||
396 | void ll_ra_read_in(struct file *f, struct ll_ra_read *rar) | |
397 | { | |
398 | struct ll_readahead_state *ras; | |
399 | ||
400 | ras = ll_ras_get(f); | |
401 | ||
402 | spin_lock(&ras->ras_lock); | |
403 | ras->ras_requests++; | |
404 | ras->ras_request_index = 0; | |
405 | ras->ras_consecutive_requests++; | |
406 | rar->lrr_reader = current; | |
407 | ||
408 | list_add(&rar->lrr_linkage, &ras->ras_read_beads); | |
409 | spin_unlock(&ras->ras_lock); | |
410 | } | |
411 | ||
412 | void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar) | |
413 | { | |
414 | struct ll_readahead_state *ras; | |
415 | ||
416 | ras = ll_ras_get(f); | |
417 | ||
418 | spin_lock(&ras->ras_lock); | |
419 | list_del_init(&rar->lrr_linkage); | |
420 | spin_unlock(&ras->ras_lock); | |
421 | } | |
422 | ||
d7e09d03 PT |
423 | static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io, |
424 | struct cl_page_list *queue, struct cl_page *page, | |
425 | struct page *vmpage) | |
426 | { | |
427 | struct ccc_page *cp; | |
428 | int rc; | |
429 | ||
d7e09d03 PT |
430 | rc = 0; |
431 | cl_page_assume(env, io, page); | |
432 | lu_ref_add(&page->cp_reference, "ra", current); | |
433 | cp = cl2ccc_page(cl_page_at(page, &vvp_device_type)); | |
434 | if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) { | |
435 | rc = cl_page_is_under_lock(env, io, page); | |
436 | if (rc == -EBUSY) { | |
437 | cp->cpg_defer_uptodate = 1; | |
438 | cp->cpg_ra_used = 0; | |
439 | cl_page_list_add(queue, page); | |
440 | rc = 1; | |
441 | } else { | |
442 | cl_page_delete(env, page); | |
443 | rc = -ENOLCK; | |
444 | } | |
445 | } else { | |
446 | /* skip completed pages */ | |
447 | cl_page_unassume(env, io, page); | |
448 | } | |
449 | lu_ref_del(&page->cp_reference, "ra", current); | |
450 | cl_page_put(env, page); | |
0a3bdb00 | 451 | return rc; |
d7e09d03 PT |
452 | } |
453 | ||
454 | /** | |
455 | * Initiates read-ahead of a page with given index. | |
456 | * | |
457 | * \retval +ve: page was added to \a queue. | |
458 | * | |
459 | * \retval -ENOLCK: there is no extent lock for this part of a file, stop | |
460 | * read-ahead. | |
461 | * | |
462 | * \retval -ve, 0: page wasn't added to \a queue for other reason. | |
463 | */ | |
464 | static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, | |
465 | struct cl_page_list *queue, | |
466 | pgoff_t index, struct address_space *mapping) | |
467 | { | |
468 | struct page *vmpage; | |
469 | struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; | |
470 | struct cl_page *page; | |
471 | enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */ | |
d7e09d03 PT |
472 | int rc = 0; |
473 | const char *msg = NULL; | |
474 | ||
d7e09d03 PT |
475 | vmpage = grab_cache_page_nowait(mapping, index); |
476 | if (vmpage != NULL) { | |
477 | /* Check if vmpage was truncated or reclaimed */ | |
478 | if (vmpage->mapping == mapping) { | |
479 | page = cl_page_find(env, clob, vmpage->index, | |
480 | vmpage, CPT_CACHEABLE); | |
481 | if (!IS_ERR(page)) { | |
482 | rc = cl_read_ahead_page(env, io, queue, | |
483 | page, vmpage); | |
484 | if (rc == -ENOLCK) { | |
485 | which = RA_STAT_FAILED_MATCH; | |
486 | msg = "lock match failed"; | |
487 | } | |
488 | } else { | |
489 | which = RA_STAT_FAILED_GRAB_PAGE; | |
490 | msg = "cl_page_find failed"; | |
491 | } | |
492 | } else { | |
493 | which = RA_STAT_WRONG_GRAB_PAGE; | |
494 | msg = "g_c_p_n returned invalid page"; | |
495 | } | |
496 | if (rc != 1) | |
497 | unlock_page(vmpage); | |
498 | page_cache_release(vmpage); | |
499 | } else { | |
500 | which = RA_STAT_FAILED_GRAB_PAGE; | |
501 | msg = "g_c_p_n failed"; | |
502 | } | |
503 | if (msg != NULL) { | |
504 | ll_ra_stats_inc(mapping, which); | |
505 | CDEBUG(D_READA, "%s\n", msg); | |
506 | } | |
0a3bdb00 | 507 | return rc; |
d7e09d03 PT |
508 | } |
509 | ||
510 | #define RIA_DEBUG(ria) \ | |
511 | CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \ | |
512 | ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\ | |
513 | ria->ria_pages) | |
514 | ||
515 | /* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't | |
516 | * know what the actual RPC size is. If this needs to change, it makes more | |
517 | * sense to tune the i_blkbits value for the file based on the OSTs it is | |
518 | * striped over, rather than having a constant value for all files here. */ | |
519 | ||
520 | /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). | |
d0a0acc3 | 521 | * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled |
d7e09d03 PT |
522 | * by default, this should be adjusted corresponding with max_read_ahead_mb |
523 | * and max_read_ahead_per_file_mb otherwise the readahead budget can be used | |
d0a0acc3 | 524 | * up quickly which will affect read performance significantly. See LU-2816 */ |
d7e09d03 PT |
525 | #define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) |
526 | ||
527 | static inline int stride_io_mode(struct ll_readahead_state *ras) | |
528 | { | |
529 | return ras->ras_consecutive_stride_requests > 1; | |
530 | } | |
c9f6bb96 | 531 | |
d7e09d03 PT |
532 | /* The function calculates how much pages will be read in |
533 | * [off, off + length], in such stride IO area, | |
d0a0acc3 | 534 | * stride_offset = st_off, stride_length = st_len, |
d7e09d03 PT |
535 | * stride_pages = st_pgs |
536 | * | |
537 | * |------------------|*****|------------------|*****|------------|*****|.... | |
538 | * st_off | |
539 | * |--- st_pgs ---| | |
540 | * |----- st_len -----| | |
541 | * | |
542 | * How many pages it should read in such pattern | |
543 | * |-------------------------------------------------------------| | |
544 | * off | |
545 | * |<------ length ------->| | |
546 | * | |
547 | * = |<----->| + |-------------------------------------| + |---| | |
548 | * start_left st_pgs * i end_left | |
549 | */ | |
550 | static unsigned long | |
551 | stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, | |
552 | unsigned long off, unsigned long length) | |
553 | { | |
554 | __u64 start = off > st_off ? off - st_off : 0; | |
555 | __u64 end = off + length > st_off ? off + length - st_off : 0; | |
556 | unsigned long start_left = 0; | |
557 | unsigned long end_left = 0; | |
558 | unsigned long pg_count; | |
559 | ||
560 | if (st_len == 0 || length == 0 || end == 0) | |
561 | return length; | |
562 | ||
563 | start_left = do_div(start, st_len); | |
564 | if (start_left < st_pgs) | |
565 | start_left = st_pgs - start_left; | |
566 | else | |
567 | start_left = 0; | |
568 | ||
569 | end_left = do_div(end, st_len); | |
570 | if (end_left > st_pgs) | |
571 | end_left = st_pgs; | |
572 | ||
b0f5aad5 | 573 | CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n", |
d7e09d03 PT |
574 | start, end, start_left, end_left); |
575 | ||
576 | if (start == end) | |
577 | pg_count = end_left - (st_pgs - start_left); | |
578 | else | |
579 | pg_count = start_left + st_pgs * (end - start - 1) + end_left; | |
580 | ||
2d00bd17 JP |
581 | CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n", |
582 | st_off, st_len, st_pgs, off, length, pg_count); | |
d7e09d03 PT |
583 | |
584 | return pg_count; | |
585 | } | |
586 | ||
587 | static int ria_page_count(struct ra_io_arg *ria) | |
588 | { | |
589 | __u64 length = ria->ria_end >= ria->ria_start ? | |
590 | ria->ria_end - ria->ria_start + 1 : 0; | |
591 | ||
592 | return stride_pg_count(ria->ria_stoff, ria->ria_length, | |
593 | ria->ria_pages, ria->ria_start, | |
594 | length); | |
595 | } | |
596 | ||
597 | /*Check whether the index is in the defined ra-window */ | |
598 | static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) | |
599 | { | |
600 | /* If ria_length == ria_pages, it means non-stride I/O mode, | |
601 | * idx should always inside read-ahead window in this case | |
602 | * For stride I/O mode, just check whether the idx is inside | |
603 | * the ria_pages. */ | |
604 | return ria->ria_length == 0 || ria->ria_length == ria->ria_pages || | |
605 | (idx >= ria->ria_stoff && (idx - ria->ria_stoff) % | |
606 | ria->ria_length < ria->ria_pages); | |
607 | } | |
608 | ||
609 | static int ll_read_ahead_pages(const struct lu_env *env, | |
610 | struct cl_io *io, struct cl_page_list *queue, | |
611 | struct ra_io_arg *ria, | |
612 | unsigned long *reserved_pages, | |
613 | struct address_space *mapping, | |
614 | unsigned long *ra_end) | |
615 | { | |
616 | int rc, count = 0, stride_ria; | |
617 | unsigned long page_idx; | |
618 | ||
619 | LASSERT(ria != NULL); | |
620 | RIA_DEBUG(ria); | |
621 | ||
622 | stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; | |
623 | for (page_idx = ria->ria_start; page_idx <= ria->ria_end && | |
624 | *reserved_pages > 0; page_idx++) { | |
625 | if (ras_inside_ra_window(page_idx, ria)) { | |
626 | /* If the page is inside the read-ahead window*/ | |
627 | rc = ll_read_ahead_page(env, io, queue, | |
628 | page_idx, mapping); | |
629 | if (rc == 1) { | |
630 | (*reserved_pages)--; | |
b2952d62 | 631 | count++; |
d7e09d03 PT |
632 | } else if (rc == -ENOLCK) |
633 | break; | |
634 | } else if (stride_ria) { | |
635 | /* If it is not in the read-ahead window, and it is | |
636 | * read-ahead mode, then check whether it should skip | |
637 | * the stride gap */ | |
638 | pgoff_t offset; | |
639 | /* FIXME: This assertion only is valid when it is for | |
640 | * forward read-ahead, it will be fixed when backward | |
641 | * read-ahead is implemented */ | |
2d00bd17 JP |
642 | LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n", |
643 | page_idx, | |
644 | ria->ria_start, ria->ria_end, ria->ria_stoff, | |
645 | ria->ria_length, ria->ria_pages); | |
d7e09d03 PT |
646 | offset = page_idx - ria->ria_stoff; |
647 | offset = offset % (ria->ria_length); | |
648 | if (offset > ria->ria_pages) { | |
649 | page_idx += ria->ria_length - offset; | |
650 | CDEBUG(D_READA, "i %lu skip %lu \n", page_idx, | |
651 | ria->ria_length - offset); | |
652 | continue; | |
653 | } | |
654 | } | |
655 | } | |
656 | *ra_end = page_idx; | |
657 | return count; | |
658 | } | |
659 | ||
660 | int ll_readahead(const struct lu_env *env, struct cl_io *io, | |
661 | struct ll_readahead_state *ras, struct address_space *mapping, | |
662 | struct cl_page_list *queue, int flags) | |
663 | { | |
664 | struct vvp_io *vio = vvp_env_io(env); | |
665 | struct vvp_thread_info *vti = vvp_env_info(env); | |
666 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
667 | unsigned long start = 0, end = 0, reserved; | |
668 | unsigned long ra_end, len; | |
669 | struct inode *inode; | |
670 | struct ll_ra_read *bead; | |
671 | struct ra_io_arg *ria = &vti->vti_ria; | |
672 | struct ll_inode_info *lli; | |
673 | struct cl_object *clob; | |
674 | int ret = 0; | |
675 | __u64 kms; | |
d7e09d03 PT |
676 | |
677 | inode = mapping->host; | |
678 | lli = ll_i2info(inode); | |
679 | clob = lli->lli_clob; | |
680 | ||
ec83e611 | 681 | memset(ria, 0, sizeof(*ria)); |
d7e09d03 PT |
682 | |
683 | cl_object_attr_lock(clob); | |
684 | ret = cl_object_attr_get(env, clob, attr); | |
685 | cl_object_attr_unlock(clob); | |
686 | ||
687 | if (ret != 0) | |
0a3bdb00 | 688 | return ret; |
d7e09d03 PT |
689 | kms = attr->cat_kms; |
690 | if (kms == 0) { | |
691 | ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN); | |
0a3bdb00 | 692 | return 0; |
d7e09d03 PT |
693 | } |
694 | ||
695 | spin_lock(&ras->ras_lock); | |
696 | if (vio->cui_ra_window_set) | |
697 | bead = &vio->cui_bead; | |
698 | else | |
699 | bead = NULL; | |
700 | ||
701 | /* Enlarge the RA window to encompass the full read */ | |
702 | if (bead != NULL && ras->ras_window_start + ras->ras_window_len < | |
703 | bead->lrr_start + bead->lrr_count) { | |
704 | ras->ras_window_len = bead->lrr_start + bead->lrr_count - | |
705 | ras->ras_window_start; | |
706 | } | |
707 | /* Reserve a part of the read-ahead window that we'll be issuing */ | |
708 | if (ras->ras_window_len) { | |
709 | start = ras->ras_next_readahead; | |
710 | end = ras->ras_window_start + ras->ras_window_len - 1; | |
711 | } | |
712 | if (end != 0) { | |
713 | unsigned long rpc_boundary; | |
714 | /* | |
715 | * Align RA window to an optimal boundary. | |
716 | * | |
717 | * XXX This would be better to align to cl_max_pages_per_rpc | |
718 | * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may | |
719 | * be aligned to the RAID stripe size in the future and that | |
720 | * is more important than the RPC size. | |
721 | */ | |
722 | /* Note: we only trim the RPC, instead of extending the RPC | |
723 | * to the boundary, so to avoid reading too much pages during | |
724 | * random reading. */ | |
b6ee3824 | 725 | rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)); |
d7e09d03 PT |
726 | if (rpc_boundary > 0) |
727 | rpc_boundary--; | |
728 | ||
729 | if (rpc_boundary > start) | |
730 | end = rpc_boundary; | |
731 | ||
732 | /* Truncate RA window to end of file */ | |
733 | end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); | |
734 | ||
735 | ras->ras_next_readahead = max(end, end + 1); | |
736 | RAS_CDEBUG(ras); | |
737 | } | |
738 | ria->ria_start = start; | |
739 | ria->ria_end = end; | |
740 | /* If stride I/O mode is detected, get stride window*/ | |
741 | if (stride_io_mode(ras)) { | |
742 | ria->ria_stoff = ras->ras_stride_offset; | |
743 | ria->ria_length = ras->ras_stride_length; | |
744 | ria->ria_pages = ras->ras_stride_pages; | |
745 | } | |
746 | spin_unlock(&ras->ras_lock); | |
747 | ||
748 | if (end == 0) { | |
749 | ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW); | |
0a3bdb00 | 750 | return 0; |
d7e09d03 PT |
751 | } |
752 | len = ria_page_count(ria); | |
753 | if (len == 0) | |
0a3bdb00 | 754 | return 0; |
d7e09d03 PT |
755 | |
756 | reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len); | |
757 | if (reserved < len) | |
758 | ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT); | |
759 | ||
760 | CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved, | |
761 | atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages), | |
762 | ll_i2sbi(inode)->ll_ra_info.ra_max_pages); | |
763 | ||
764 | ret = ll_read_ahead_pages(env, io, queue, | |
765 | ria, &reserved, mapping, &ra_end); | |
766 | ||
767 | LASSERTF(reserved >= 0, "reserved %lu\n", reserved); | |
768 | if (reserved != 0) | |
769 | ll_ra_count_put(ll_i2sbi(inode), reserved); | |
770 | ||
771 | if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) | |
772 | ll_ra_stats_inc(mapping, RA_STAT_EOF); | |
773 | ||
774 | /* if we didn't get to the end of the region we reserved from | |
775 | * the ras we need to go back and update the ras so that the | |
776 | * next read-ahead tries from where we left off. we only do so | |
777 | * if the region we failed to issue read-ahead on is still ahead | |
778 | * of the app and behind the next index to start read-ahead from */ | |
779 | CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n", | |
780 | ra_end, end, ria->ria_end); | |
781 | ||
782 | if (ra_end != end + 1) { | |
783 | spin_lock(&ras->ras_lock); | |
784 | if (ra_end < ras->ras_next_readahead && | |
785 | index_in_window(ra_end, ras->ras_window_start, 0, | |
786 | ras->ras_window_len)) { | |
787 | ras->ras_next_readahead = ra_end; | |
788 | RAS_CDEBUG(ras); | |
789 | } | |
790 | spin_unlock(&ras->ras_lock); | |
791 | } | |
792 | ||
0a3bdb00 | 793 | return ret; |
d7e09d03 PT |
794 | } |
795 | ||
796 | static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras, | |
797 | unsigned long index) | |
798 | { | |
799 | ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1)); | |
800 | } | |
801 | ||
802 | /* called with the ras_lock held or from places where it doesn't matter */ | |
803 | static void ras_reset(struct inode *inode, struct ll_readahead_state *ras, | |
804 | unsigned long index) | |
805 | { | |
806 | ras->ras_last_readpage = index; | |
807 | ras->ras_consecutive_requests = 0; | |
808 | ras->ras_consecutive_pages = 0; | |
809 | ras->ras_window_len = 0; | |
810 | ras_set_start(inode, ras, index); | |
811 | ras->ras_next_readahead = max(ras->ras_window_start, index); | |
812 | ||
813 | RAS_CDEBUG(ras); | |
814 | } | |
815 | ||
816 | /* called with the ras_lock held or from places where it doesn't matter */ | |
817 | static void ras_stride_reset(struct ll_readahead_state *ras) | |
818 | { | |
819 | ras->ras_consecutive_stride_requests = 0; | |
820 | ras->ras_stride_length = 0; | |
821 | ras->ras_stride_pages = 0; | |
822 | RAS_CDEBUG(ras); | |
823 | } | |
824 | ||
825 | void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras) | |
826 | { | |
827 | spin_lock_init(&ras->ras_lock); | |
828 | ras_reset(inode, ras, 0); | |
829 | ras->ras_requests = 0; | |
830 | INIT_LIST_HEAD(&ras->ras_read_beads); | |
831 | } | |
832 | ||
833 | /* | |
834 | * Check whether the read request is in the stride window. | |
835 | * If it is in the stride window, return 1, otherwise return 0. | |
836 | */ | |
837 | static int index_in_stride_window(struct ll_readahead_state *ras, | |
838 | unsigned long index) | |
839 | { | |
840 | unsigned long stride_gap; | |
841 | ||
842 | if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 || | |
843 | ras->ras_stride_pages == ras->ras_stride_length) | |
844 | return 0; | |
845 | ||
846 | stride_gap = index - ras->ras_last_readpage - 1; | |
847 | ||
848 | /* If it is contiguous read */ | |
849 | if (stride_gap == 0) | |
850 | return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages; | |
851 | ||
852 | /* Otherwise check the stride by itself */ | |
853 | return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap && | |
854 | ras->ras_consecutive_pages == ras->ras_stride_pages; | |
855 | } | |
856 | ||
857 | static void ras_update_stride_detector(struct ll_readahead_state *ras, | |
858 | unsigned long index) | |
859 | { | |
860 | unsigned long stride_gap = index - ras->ras_last_readpage - 1; | |
861 | ||
862 | if (!stride_io_mode(ras) && (stride_gap != 0 || | |
863 | ras->ras_consecutive_stride_requests == 0)) { | |
864 | ras->ras_stride_pages = ras->ras_consecutive_pages; | |
b2952d62 | 865 | ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages; |
d7e09d03 PT |
866 | } |
867 | LASSERT(ras->ras_request_index == 0); | |
868 | LASSERT(ras->ras_consecutive_stride_requests == 0); | |
869 | ||
870 | if (index <= ras->ras_last_readpage) { | |
871 | /*Reset stride window for forward read*/ | |
872 | ras_stride_reset(ras); | |
873 | return; | |
874 | } | |
875 | ||
876 | ras->ras_stride_pages = ras->ras_consecutive_pages; | |
b2952d62 | 877 | ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages; |
d7e09d03 PT |
878 | |
879 | RAS_CDEBUG(ras); | |
880 | return; | |
881 | } | |
882 | ||
d7e09d03 PT |
883 | /* Stride Read-ahead window will be increased inc_len according to |
884 | * stride I/O pattern */ | |
885 | static void ras_stride_increase_window(struct ll_readahead_state *ras, | |
886 | struct ll_ra_info *ra, | |
887 | unsigned long inc_len) | |
888 | { | |
889 | unsigned long left, step, window_len; | |
890 | unsigned long stride_len; | |
891 | ||
892 | LASSERT(ras->ras_stride_length > 0); | |
893 | LASSERTF(ras->ras_window_start + ras->ras_window_len | |
2d00bd17 JP |
894 | >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n", |
895 | ras->ras_window_start, | |
d7e09d03 PT |
896 | ras->ras_window_len, ras->ras_stride_offset); |
897 | ||
898 | stride_len = ras->ras_window_start + ras->ras_window_len - | |
899 | ras->ras_stride_offset; | |
900 | ||
901 | left = stride_len % ras->ras_stride_length; | |
902 | window_len = ras->ras_window_len - left; | |
903 | ||
904 | if (left < ras->ras_stride_pages) | |
905 | left += inc_len; | |
906 | else | |
907 | left = ras->ras_stride_pages + inc_len; | |
908 | ||
909 | LASSERT(ras->ras_stride_pages != 0); | |
910 | ||
911 | step = left / ras->ras_stride_pages; | |
912 | left %= ras->ras_stride_pages; | |
913 | ||
914 | window_len += step * ras->ras_stride_length + left; | |
915 | ||
464e5947 SB |
916 | if (stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length, |
917 | ras->ras_stride_pages, ras->ras_stride_offset, | |
918 | window_len) <= ra->ra_max_pages_per_file) | |
d7e09d03 PT |
919 | ras->ras_window_len = window_len; |
920 | ||
921 | RAS_CDEBUG(ras); | |
922 | } | |
923 | ||
924 | static void ras_increase_window(struct inode *inode, | |
925 | struct ll_readahead_state *ras, | |
926 | struct ll_ra_info *ra) | |
927 | { | |
928 | /* The stretch of ra-window should be aligned with max rpc_size | |
929 | * but current clio architecture does not support retrieve such | |
930 | * information from lower layer. FIXME later | |
931 | */ | |
932 | if (stride_io_mode(ras)) | |
933 | ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode)); | |
934 | else | |
935 | ras->ras_window_len = min(ras->ras_window_len + | |
936 | RAS_INCREASE_STEP(inode), | |
937 | ra->ra_max_pages_per_file); | |
938 | } | |
939 | ||
940 | void ras_update(struct ll_sb_info *sbi, struct inode *inode, | |
941 | struct ll_readahead_state *ras, unsigned long index, | |
942 | unsigned hit) | |
943 | { | |
944 | struct ll_ra_info *ra = &sbi->ll_ra_info; | |
945 | int zero = 0, stride_detect = 0, ra_miss = 0; | |
d7e09d03 PT |
946 | |
947 | spin_lock(&ras->ras_lock); | |
948 | ||
949 | ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS); | |
950 | ||
951 | /* reset the read-ahead window in two cases. First when the app seeks | |
952 | * or reads to some other part of the file. Secondly if we get a | |
953 | * read-ahead miss that we think we've previously issued. This can | |
954 | * be a symptom of there being so many read-ahead pages that the VM is | |
955 | * reclaiming it before we get to it. */ | |
956 | if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) { | |
957 | zero = 1; | |
958 | ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); | |
959 | } else if (!hit && ras->ras_window_len && | |
960 | index < ras->ras_next_readahead && | |
961 | index_in_window(index, ras->ras_window_start, 0, | |
962 | ras->ras_window_len)) { | |
963 | ra_miss = 1; | |
964 | ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW); | |
965 | } | |
966 | ||
967 | /* On the second access to a file smaller than the tunable | |
968 | * ra_max_read_ahead_whole_pages trigger RA on all pages in the | |
969 | * file up to ra_max_pages_per_file. This is simply a best effort | |
970 | * and only occurs once per open file. Normal RA behavior is reverted | |
971 | * to for subsequent IO. The mmap case does not increment | |
972 | * ras_requests and thus can never trigger this behavior. */ | |
973 | if (ras->ras_requests == 2 && !ras->ras_request_index) { | |
974 | __u64 kms_pages; | |
975 | ||
976 | kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | |
977 | PAGE_CACHE_SHIFT; | |
978 | ||
b0f5aad5 | 979 | CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, |
d7e09d03 PT |
980 | ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); |
981 | ||
982 | if (kms_pages && | |
983 | kms_pages <= ra->ra_max_read_ahead_whole_pages) { | |
984 | ras->ras_window_start = 0; | |
985 | ras->ras_last_readpage = 0; | |
986 | ras->ras_next_readahead = 0; | |
987 | ras->ras_window_len = min(ra->ra_max_pages_per_file, | |
988 | ra->ra_max_read_ahead_whole_pages); | |
34e1f2bb | 989 | goto out_unlock; |
d7e09d03 PT |
990 | } |
991 | } | |
992 | if (zero) { | |
993 | /* check whether it is in stride I/O mode*/ | |
994 | if (!index_in_stride_window(ras, index)) { | |
995 | if (ras->ras_consecutive_stride_requests == 0 && | |
996 | ras->ras_request_index == 0) { | |
997 | ras_update_stride_detector(ras, index); | |
998 | ras->ras_consecutive_stride_requests++; | |
999 | } else { | |
1000 | ras_stride_reset(ras); | |
1001 | } | |
1002 | ras_reset(inode, ras, index); | |
1003 | ras->ras_consecutive_pages++; | |
34e1f2bb | 1004 | goto out_unlock; |
d7e09d03 PT |
1005 | } else { |
1006 | ras->ras_consecutive_pages = 0; | |
1007 | ras->ras_consecutive_requests = 0; | |
1008 | if (++ras->ras_consecutive_stride_requests > 1) | |
1009 | stride_detect = 1; | |
1010 | RAS_CDEBUG(ras); | |
1011 | } | |
1012 | } else { | |
1013 | if (ra_miss) { | |
1014 | if (index_in_stride_window(ras, index) && | |
1015 | stride_io_mode(ras)) { | |
1016 | /*If stride-RA hit cache miss, the stride dector | |
1017 | *will not be reset to avoid the overhead of | |
1018 | *redetecting read-ahead mode */ | |
1019 | if (index != ras->ras_last_readpage + 1) | |
1020 | ras->ras_consecutive_pages = 0; | |
1021 | ras_reset(inode, ras, index); | |
1022 | RAS_CDEBUG(ras); | |
1023 | } else { | |
1024 | /* Reset both stride window and normal RA | |
1025 | * window */ | |
1026 | ras_reset(inode, ras, index); | |
1027 | ras->ras_consecutive_pages++; | |
1028 | ras_stride_reset(ras); | |
34e1f2bb | 1029 | goto out_unlock; |
d7e09d03 PT |
1030 | } |
1031 | } else if (stride_io_mode(ras)) { | |
1032 | /* If this is contiguous read but in stride I/O mode | |
1033 | * currently, check whether stride step still is valid, | |
1034 | * if invalid, it will reset the stride ra window*/ | |
1035 | if (!index_in_stride_window(ras, index)) { | |
1036 | /* Shrink stride read-ahead window to be zero */ | |
1037 | ras_stride_reset(ras); | |
1038 | ras->ras_window_len = 0; | |
1039 | ras->ras_next_readahead = index; | |
1040 | } | |
1041 | } | |
1042 | } | |
1043 | ras->ras_consecutive_pages++; | |
1044 | ras->ras_last_readpage = index; | |
1045 | ras_set_start(inode, ras, index); | |
1046 | ||
1047 | if (stride_io_mode(ras)) | |
d0a0acc3 | 1048 | /* Since stride readahead is sensitive to the offset |
d7e09d03 PT |
1049 | * of read-ahead, so we use original offset here, |
1050 | * instead of ras_window_start, which is RPC aligned */ | |
1051 | ras->ras_next_readahead = max(index, ras->ras_next_readahead); | |
1052 | else | |
1053 | ras->ras_next_readahead = max(ras->ras_window_start, | |
1054 | ras->ras_next_readahead); | |
1055 | RAS_CDEBUG(ras); | |
1056 | ||
1057 | /* Trigger RA in the mmap case where ras_consecutive_requests | |
1058 | * is not incremented and thus can't be used to trigger RA */ | |
1059 | if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { | |
1060 | ras->ras_window_len = RAS_INCREASE_STEP(inode); | |
34e1f2bb | 1061 | goto out_unlock; |
d7e09d03 PT |
1062 | } |
1063 | ||
1064 | /* Initially reset the stride window offset to next_readahead*/ | |
1065 | if (ras->ras_consecutive_stride_requests == 2 && stride_detect) { | |
1066 | /** | |
1067 | * Once stride IO mode is detected, next_readahead should be | |
1068 | * reset to make sure next_readahead > stride offset | |
1069 | */ | |
1070 | ras->ras_next_readahead = max(index, ras->ras_next_readahead); | |
1071 | ras->ras_stride_offset = index; | |
1072 | ras->ras_window_len = RAS_INCREASE_STEP(inode); | |
1073 | } | |
1074 | ||
1075 | /* The initial ras_window_len is set to the request size. To avoid | |
1076 | * uselessly reading and discarding pages for random IO the window is | |
1077 | * only increased once per consecutive request received. */ | |
1078 | if ((ras->ras_consecutive_requests > 1 || stride_detect) && | |
1079 | !ras->ras_request_index) | |
1080 | ras_increase_window(inode, ras, ra); | |
d7e09d03 PT |
1081 | out_unlock: |
1082 | RAS_CDEBUG(ras); | |
1083 | ras->ras_request_index++; | |
1084 | spin_unlock(&ras->ras_lock); | |
1085 | return; | |
1086 | } | |
1087 | ||
1088 | int ll_writepage(struct page *vmpage, struct writeback_control *wbc) | |
1089 | { | |
1090 | struct inode *inode = vmpage->mapping->host; | |
1091 | struct ll_inode_info *lli = ll_i2info(inode); | |
1092 | struct lu_env *env; | |
1093 | struct cl_io *io; | |
1094 | struct cl_page *page; | |
1095 | struct cl_object *clob; | |
1096 | struct cl_env_nest nest; | |
1097 | bool redirtied = false; | |
1098 | bool unlocked = false; | |
1099 | int result; | |
d7e09d03 PT |
1100 | |
1101 | LASSERT(PageLocked(vmpage)); | |
1102 | LASSERT(!PageWriteback(vmpage)); | |
1103 | ||
1104 | LASSERT(ll_i2dtexp(inode) != NULL); | |
1105 | ||
1106 | env = cl_env_nested_get(&nest); | |
34e1f2bb JL |
1107 | if (IS_ERR(env)) { |
1108 | result = PTR_ERR(env); | |
1109 | goto out; | |
1110 | } | |
d7e09d03 PT |
1111 | |
1112 | clob = ll_i2info(inode)->lli_clob; | |
1113 | LASSERT(clob != NULL); | |
1114 | ||
1115 | io = ccc_env_thread_io(env); | |
1116 | io->ci_obj = clob; | |
1117 | io->ci_ignore_layout = 1; | |
1118 | result = cl_io_init(env, io, CIT_MISC, clob); | |
1119 | if (result == 0) { | |
1120 | page = cl_page_find(env, clob, vmpage->index, | |
1121 | vmpage, CPT_CACHEABLE); | |
1122 | if (!IS_ERR(page)) { | |
1123 | lu_ref_add(&page->cp_reference, "writepage", | |
1124 | current); | |
1125 | cl_page_assume(env, io, page); | |
1126 | result = cl_page_flush(env, io, page); | |
1127 | if (result != 0) { | |
1128 | /* | |
1129 | * Re-dirty page on error so it retries write, | |
1130 | * but not in case when IO has actually | |
1131 | * occurred and completed with an error. | |
1132 | */ | |
1133 | if (!PageError(vmpage)) { | |
1134 | redirty_page_for_writepage(wbc, vmpage); | |
1135 | result = 0; | |
1136 | redirtied = true; | |
1137 | } | |
1138 | } | |
1139 | cl_page_disown(env, io, page); | |
1140 | unlocked = true; | |
1141 | lu_ref_del(&page->cp_reference, | |
1142 | "writepage", current); | |
1143 | cl_page_put(env, page); | |
1144 | } else { | |
1145 | result = PTR_ERR(page); | |
1146 | } | |
1147 | } | |
1148 | cl_io_fini(env, io); | |
1149 | ||
1150 | if (redirtied && wbc->sync_mode == WB_SYNC_ALL) { | |
1151 | loff_t offset = cl_offset(clob, vmpage->index); | |
1152 | ||
1153 | /* Flush page failed because the extent is being written out. | |
1154 | * Wait for the write of extent to be finished to avoid | |
1155 | * breaking kernel which assumes ->writepage should mark | |
1156 | * PageWriteback or clean the page. */ | |
1157 | result = cl_sync_file_range(inode, offset, | |
1158 | offset + PAGE_CACHE_SIZE - 1, | |
65fb55d1 | 1159 | CL_FSYNC_LOCAL, 1); |
d7e09d03 PT |
1160 | if (result > 0) { |
1161 | /* actually we may have written more than one page. | |
1162 | * decreasing this page because the caller will count | |
1163 | * it. */ | |
1164 | wbc->nr_to_write -= result - 1; | |
1165 | result = 0; | |
1166 | } | |
1167 | } | |
1168 | ||
1169 | cl_env_nested_put(&nest, env); | |
34e1f2bb | 1170 | goto out; |
d7e09d03 PT |
1171 | |
1172 | out: | |
1173 | if (result < 0) { | |
1174 | if (!lli->lli_async_rc) | |
1175 | lli->lli_async_rc = result; | |
1176 | SetPageError(vmpage); | |
1177 | if (!unlocked) | |
1178 | unlock_page(vmpage); | |
1179 | } | |
1180 | return result; | |
1181 | } | |
1182 | ||
1183 | int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) | |
1184 | { | |
1185 | struct inode *inode = mapping->host; | |
65fb55d1 | 1186 | struct ll_sb_info *sbi = ll_i2sbi(inode); |
d7e09d03 PT |
1187 | loff_t start; |
1188 | loff_t end; | |
1189 | enum cl_fsync_mode mode; | |
1190 | int range_whole = 0; | |
1191 | int result; | |
65fb55d1 | 1192 | int ignore_layout = 0; |
d7e09d03 PT |
1193 | |
1194 | if (wbc->range_cyclic) { | |
1195 | start = mapping->writeback_index << PAGE_CACHE_SHIFT; | |
1196 | end = OBD_OBJECT_EOF; | |
1197 | } else { | |
1198 | start = wbc->range_start; | |
1199 | end = wbc->range_end; | |
1200 | if (end == LLONG_MAX) { | |
1201 | end = OBD_OBJECT_EOF; | |
1202 | range_whole = start == 0; | |
1203 | } | |
1204 | } | |
1205 | ||
1206 | mode = CL_FSYNC_NONE; | |
1207 | if (wbc->sync_mode == WB_SYNC_ALL) | |
1208 | mode = CL_FSYNC_LOCAL; | |
1209 | ||
65fb55d1 NY |
1210 | if (sbi->ll_umounting) |
1211 | /* if the mountpoint is being umounted, all pages have to be | |
1212 | * evicted to avoid hitting LBUG when truncate_inode_pages() | |
1213 | * is called later on. */ | |
1214 | ignore_layout = 1; | |
1215 | result = cl_sync_file_range(inode, start, end, mode, ignore_layout); | |
d7e09d03 PT |
1216 | if (result > 0) { |
1217 | wbc->nr_to_write -= result; | |
1218 | result = 0; | |
1219 | } | |
1220 | ||
1221 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { | |
1222 | if (end == OBD_OBJECT_EOF) | |
1223 | end = i_size_read(inode); | |
1224 | mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; | |
1225 | } | |
0a3bdb00 | 1226 | return result; |
d7e09d03 PT |
1227 | } |
1228 | ||
1229 | int ll_readpage(struct file *file, struct page *vmpage) | |
1230 | { | |
1231 | struct ll_cl_context *lcc; | |
1232 | int result; | |
d7e09d03 PT |
1233 | |
1234 | lcc = ll_cl_init(file, vmpage, 0); | |
1235 | if (!IS_ERR(lcc)) { | |
1236 | struct lu_env *env = lcc->lcc_env; | |
1237 | struct cl_io *io = lcc->lcc_io; | |
1238 | struct cl_page *page = lcc->lcc_page; | |
1239 | ||
1240 | LASSERT(page->cp_type == CPT_CACHEABLE); | |
1241 | if (likely(!PageUptodate(vmpage))) { | |
1242 | cl_page_assume(env, io, page); | |
1243 | result = cl_io_read_page(env, io, page); | |
1244 | } else { | |
1245 | /* Page from a non-object file. */ | |
1246 | unlock_page(vmpage); | |
1247 | result = 0; | |
1248 | } | |
1249 | ll_cl_fini(lcc); | |
1250 | } else { | |
1251 | unlock_page(vmpage); | |
1252 | result = PTR_ERR(lcc); | |
1253 | } | |
0a3bdb00 | 1254 | return result; |
d7e09d03 | 1255 | } |