Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
1dc563a6 | 30 | * Copyright (c) 2011, 2015, Intel Corporation. |
d7e09d03 PT |
31 | */ |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * Implementation of cl_page for OSC layer. | |
37 | * | |
38 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
39 | */ | |
40 | ||
41 | #define DEBUG_SUBSYSTEM S_OSC | |
42 | ||
43 | #include "osc_cl_internal.h" | |
44 | ||
45 | static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del); | |
46 | static void osc_lru_add(struct client_obd *cli, struct osc_page *opg); | |
47 | static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, | |
48 | struct osc_page *opg); | |
49 | ||
50 | /** \addtogroup osc | |
51 | * @{ | |
52 | */ | |
53 | ||
d7e09d03 PT |
54 | static int osc_page_protected(const struct lu_env *env, |
55 | const struct osc_page *opg, | |
56 | enum cl_lock_mode mode, int unref) | |
57 | { | |
58 | return 1; | |
59 | } | |
d7e09d03 PT |
60 | |
61 | /***************************************************************************** | |
62 | * | |
63 | * Page operations. | |
64 | * | |
65 | */ | |
66 | static void osc_page_fini(const struct lu_env *env, | |
67 | struct cl_page_slice *slice) | |
68 | { | |
69 | struct osc_page *opg = cl2osc_page(slice); | |
50ffcb7e | 70 | |
d7e09d03 | 71 | CDEBUG(D_TRACE, "%p\n", opg); |
7f1ae4c0 | 72 | LASSERT(!opg->ops_lock); |
d7e09d03 PT |
73 | } |
74 | ||
75 | static void osc_page_transfer_get(struct osc_page *opg, const char *label) | |
76 | { | |
77 | struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page); | |
78 | ||
79 | LASSERT(!opg->ops_transfer_pinned); | |
80 | cl_page_get(page); | |
81 | lu_ref_add_atomic(&page->cp_reference, label, page); | |
82 | opg->ops_transfer_pinned = 1; | |
83 | } | |
84 | ||
85 | static void osc_page_transfer_put(const struct lu_env *env, | |
86 | struct osc_page *opg) | |
87 | { | |
88 | struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page); | |
89 | ||
90 | if (opg->ops_transfer_pinned) { | |
91 | lu_ref_del(&page->cp_reference, "transfer", page); | |
92 | opg->ops_transfer_pinned = 0; | |
93 | cl_page_put(env, page); | |
94 | } | |
95 | } | |
96 | ||
97 | /** | |
98 | * This is called once for every page when it is submitted for a transfer | |
99 | * either opportunistic (osc_page_cache_add()), or immediate | |
100 | * (osc_page_submit()). | |
101 | */ | |
102 | static void osc_page_transfer_add(const struct lu_env *env, | |
103 | struct osc_page *opg, enum cl_req_type crt) | |
104 | { | |
105 | struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); | |
106 | ||
107 | /* ops_lru and ops_inflight share the same field, so take it from LRU | |
108 | * first and then use it as inflight. */ | |
109 | osc_lru_del(osc_cli(obj), opg, false); | |
110 | ||
111 | spin_lock(&obj->oo_seatbelt); | |
112 | list_add(&opg->ops_inflight, &obj->oo_inflight[crt]); | |
113 | opg->ops_submitter = current; | |
114 | spin_unlock(&obj->oo_seatbelt); | |
115 | } | |
116 | ||
117 | static int osc_page_cache_add(const struct lu_env *env, | |
118 | const struct cl_page_slice *slice, | |
119 | struct cl_io *io) | |
120 | { | |
29ac6840 | 121 | struct osc_io *oio = osc_env_io(env); |
d7e09d03 PT |
122 | struct osc_page *opg = cl2osc_page(slice); |
123 | int result; | |
d7e09d03 PT |
124 | |
125 | LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0)); | |
126 | ||
127 | osc_page_transfer_get(opg, "transfer\0cache"); | |
128 | result = osc_queue_async_io(env, io, opg); | |
129 | if (result != 0) | |
130 | osc_page_transfer_put(env, opg); | |
131 | else | |
132 | osc_page_transfer_add(env, opg, CRT_WRITE); | |
133 | ||
134 | /* for sync write, kernel will wait for this page to be flushed before | |
135 | * osc_io_end() is called, so release it earlier. | |
136 | * for mkwrite(), it's known there is no further pages. */ | |
137 | if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) { | |
7f1ae4c0 | 138 | if (oio->oi_active) { |
d7e09d03 PT |
139 | osc_extent_release(env, oio->oi_active); |
140 | oio->oi_active = NULL; | |
141 | } | |
142 | } | |
143 | ||
0a3bdb00 | 144 | return result; |
d7e09d03 PT |
145 | } |
146 | ||
147 | void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj, | |
148 | pgoff_t start, pgoff_t end) | |
149 | { | |
ec83e611 | 150 | memset(policy, 0, sizeof(*policy)); |
d7e09d03 | 151 | policy->l_extent.start = cl_offset(obj, start); |
29ac6840 | 152 | policy->l_extent.end = cl_offset(obj, end + 1) - 1; |
d7e09d03 PT |
153 | } |
154 | ||
155 | static int osc_page_addref_lock(const struct lu_env *env, | |
156 | struct osc_page *opg, | |
157 | struct cl_lock *lock) | |
158 | { | |
159 | struct osc_lock *olock; | |
29ac6840 | 160 | int rc; |
d7e09d03 | 161 | |
7f1ae4c0 | 162 | LASSERT(!opg->ops_lock); |
d7e09d03 PT |
163 | |
164 | olock = osc_lock_at(lock); | |
165 | if (atomic_inc_return(&olock->ols_pageref) <= 0) { | |
166 | atomic_dec(&olock->ols_pageref); | |
167 | rc = -ENODATA; | |
168 | } else { | |
169 | cl_lock_get(lock); | |
170 | opg->ops_lock = lock; | |
171 | rc = 0; | |
172 | } | |
173 | return rc; | |
174 | } | |
175 | ||
176 | static void osc_page_putref_lock(const struct lu_env *env, | |
177 | struct osc_page *opg) | |
178 | { | |
29ac6840 | 179 | struct cl_lock *lock = opg->ops_lock; |
d7e09d03 PT |
180 | struct osc_lock *olock; |
181 | ||
7f1ae4c0 | 182 | LASSERT(lock); |
d7e09d03 PT |
183 | olock = osc_lock_at(lock); |
184 | ||
185 | atomic_dec(&olock->ols_pageref); | |
186 | opg->ops_lock = NULL; | |
187 | ||
188 | cl_lock_put(env, lock); | |
189 | } | |
190 | ||
191 | static int osc_page_is_under_lock(const struct lu_env *env, | |
192 | const struct cl_page_slice *slice, | |
193 | struct cl_io *unused) | |
194 | { | |
195 | struct cl_lock *lock; | |
29ac6840 | 196 | int result = -ENODATA; |
d7e09d03 | 197 | |
d7e09d03 PT |
198 | lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, |
199 | NULL, 1, 0); | |
7f1ae4c0 | 200 | if (lock) { |
d7e09d03 PT |
201 | if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) |
202 | result = -EBUSY; | |
203 | cl_lock_put(env, lock); | |
204 | } | |
0a3bdb00 | 205 | return result; |
d7e09d03 PT |
206 | } |
207 | ||
208 | static void osc_page_disown(const struct lu_env *env, | |
209 | const struct cl_page_slice *slice, | |
210 | struct cl_io *io) | |
211 | { | |
212 | struct osc_page *opg = cl2osc_page(slice); | |
213 | ||
214 | if (unlikely(opg->ops_lock)) | |
215 | osc_page_putref_lock(env, opg); | |
216 | } | |
217 | ||
218 | static void osc_page_completion_read(const struct lu_env *env, | |
219 | const struct cl_page_slice *slice, | |
220 | int ioret) | |
221 | { | |
29ac6840 | 222 | struct osc_page *opg = cl2osc_page(slice); |
d7e09d03 PT |
223 | struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); |
224 | ||
225 | if (likely(opg->ops_lock)) | |
226 | osc_page_putref_lock(env, opg); | |
227 | osc_lru_add(osc_cli(obj), opg); | |
228 | } | |
229 | ||
230 | static void osc_page_completion_write(const struct lu_env *env, | |
231 | const struct cl_page_slice *slice, | |
232 | int ioret) | |
233 | { | |
29ac6840 | 234 | struct osc_page *opg = cl2osc_page(slice); |
d7e09d03 PT |
235 | struct osc_object *obj = cl2osc(slice->cpl_obj); |
236 | ||
237 | osc_lru_add(osc_cli(obj), opg); | |
238 | } | |
239 | ||
240 | static int osc_page_fail(const struct lu_env *env, | |
241 | const struct cl_page_slice *slice, | |
242 | struct cl_io *unused) | |
243 | { | |
244 | /* | |
245 | * Cached read? | |
246 | */ | |
247 | LBUG(); | |
248 | return 0; | |
249 | } | |
250 | ||
d7e09d03 PT |
251 | static const char *osc_list(struct list_head *head) |
252 | { | |
253 | return list_empty(head) ? "-" : "+"; | |
254 | } | |
255 | ||
a649ad1d | 256 | static inline unsigned long osc_submit_duration(struct osc_page *opg) |
d7e09d03 PT |
257 | { |
258 | if (opg->ops_submit_time == 0) | |
259 | return 0; | |
260 | ||
261 | return (cfs_time_current() - opg->ops_submit_time); | |
262 | } | |
263 | ||
264 | static int osc_page_print(const struct lu_env *env, | |
265 | const struct cl_page_slice *slice, | |
266 | void *cookie, lu_printer_t printer) | |
267 | { | |
29ac6840 | 268 | struct osc_page *opg = cl2osc_page(slice); |
d7e09d03 | 269 | struct osc_async_page *oap = &opg->ops_oap; |
29ac6840 CH |
270 | struct osc_object *obj = cl2osc(slice->cpl_obj); |
271 | struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli; | |
d7e09d03 | 272 | |
2d00bd17 | 273 | return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n", |
d7e09d03 PT |
274 | opg, |
275 | /* 1 */ | |
276 | oap->oap_magic, oap->oap_cmd, | |
277 | oap->oap_interrupted, | |
278 | osc_list(&oap->oap_pending_item), | |
279 | osc_list(&oap->oap_rpc_item), | |
280 | /* 2 */ | |
281 | oap->oap_obj_off, oap->oap_page_off, oap->oap_count, | |
282 | oap->oap_async_flags, oap->oap_brw_flags, | |
283 | oap->oap_request, oap->oap_cli, obj, | |
284 | /* 3 */ | |
285 | osc_list(&opg->ops_inflight), | |
286 | opg->ops_submitter, opg->ops_transfer_pinned, | |
287 | osc_submit_duration(opg), opg->ops_srvlock, | |
288 | /* 4 */ | |
289 | cli->cl_r_in_flight, cli->cl_w_in_flight, | |
290 | cli->cl_max_rpcs_in_flight, | |
291 | cli->cl_avail_grant, | |
292 | osc_list(&cli->cl_cache_waiters), | |
293 | osc_list(&cli->cl_loi_ready_list), | |
294 | osc_list(&cli->cl_loi_hp_ready_list), | |
295 | osc_list(&cli->cl_loi_write_list), | |
296 | osc_list(&cli->cl_loi_read_list), | |
297 | /* 5 */ | |
298 | osc_list(&obj->oo_ready_item), | |
299 | osc_list(&obj->oo_hp_ready_item), | |
300 | osc_list(&obj->oo_write_item), | |
301 | osc_list(&obj->oo_read_item), | |
302 | atomic_read(&obj->oo_nr_reads), | |
303 | osc_list(&obj->oo_reading_exts), | |
304 | atomic_read(&obj->oo_nr_writes), | |
305 | osc_list(&obj->oo_hp_exts), | |
306 | osc_list(&obj->oo_urgent_exts)); | |
307 | } | |
308 | ||
309 | static void osc_page_delete(const struct lu_env *env, | |
310 | const struct cl_page_slice *slice) | |
311 | { | |
29ac6840 | 312 | struct osc_page *opg = cl2osc_page(slice); |
d7e09d03 PT |
313 | struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); |
314 | int rc; | |
315 | ||
316 | LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1)); | |
317 | ||
d7e09d03 PT |
318 | CDEBUG(D_TRACE, "%p\n", opg); |
319 | osc_page_transfer_put(env, opg); | |
320 | rc = osc_teardown_async_page(env, obj, opg); | |
321 | if (rc) { | |
322 | CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page), | |
323 | "Trying to teardown failed: %d\n", rc); | |
324 | LASSERT(0); | |
325 | } | |
326 | ||
327 | spin_lock(&obj->oo_seatbelt); | |
7f1ae4c0 | 328 | if (opg->ops_submitter) { |
d7e09d03 PT |
329 | LASSERT(!list_empty(&opg->ops_inflight)); |
330 | list_del_init(&opg->ops_inflight); | |
331 | opg->ops_submitter = NULL; | |
332 | } | |
333 | spin_unlock(&obj->oo_seatbelt); | |
334 | ||
335 | osc_lru_del(osc_cli(obj), opg, true); | |
d7e09d03 PT |
336 | } |
337 | ||
f6ab21b5 CFC |
338 | static void osc_page_clip(const struct lu_env *env, |
339 | const struct cl_page_slice *slice, int from, int to) | |
d7e09d03 | 340 | { |
29ac6840 | 341 | struct osc_page *opg = cl2osc_page(slice); |
d7e09d03 PT |
342 | struct osc_async_page *oap = &opg->ops_oap; |
343 | ||
344 | LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); | |
345 | ||
346 | opg->ops_from = from; | |
29ac6840 | 347 | opg->ops_to = to; |
d7e09d03 PT |
348 | spin_lock(&oap->oap_lock); |
349 | oap->oap_async_flags |= ASYNC_COUNT_STABLE; | |
350 | spin_unlock(&oap->oap_lock); | |
351 | } | |
352 | ||
353 | static int osc_page_cancel(const struct lu_env *env, | |
354 | const struct cl_page_slice *slice) | |
355 | { | |
356 | struct osc_page *opg = cl2osc_page(slice); | |
357 | int rc = 0; | |
358 | ||
359 | LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); | |
360 | ||
361 | /* Check if the transferring against this page | |
362 | * is completed, or not even queued. */ | |
363 | if (opg->ops_transfer_pinned) | |
364 | /* FIXME: may not be interrupted.. */ | |
365 | rc = osc_cancel_async_page(env, opg); | |
366 | LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0)); | |
367 | return rc; | |
368 | } | |
369 | ||
370 | static int osc_page_flush(const struct lu_env *env, | |
371 | const struct cl_page_slice *slice, | |
372 | struct cl_io *io) | |
373 | { | |
374 | struct osc_page *opg = cl2osc_page(slice); | |
53a0d486 | 375 | int rc; |
29aaf496 | 376 | |
d7e09d03 | 377 | rc = osc_flush_async_page(env, io, opg); |
0a3bdb00 | 378 | return rc; |
d7e09d03 PT |
379 | } |
380 | ||
381 | static const struct cl_page_operations osc_page_ops = { | |
382 | .cpo_fini = osc_page_fini, | |
383 | .cpo_print = osc_page_print, | |
384 | .cpo_delete = osc_page_delete, | |
385 | .cpo_is_under_lock = osc_page_is_under_lock, | |
386 | .cpo_disown = osc_page_disown, | |
387 | .io = { | |
388 | [CRT_READ] = { | |
389 | .cpo_cache_add = osc_page_fail, | |
390 | .cpo_completion = osc_page_completion_read | |
391 | }, | |
392 | [CRT_WRITE] = { | |
393 | .cpo_cache_add = osc_page_cache_add, | |
394 | .cpo_completion = osc_page_completion_write | |
395 | } | |
396 | }, | |
397 | .cpo_clip = osc_page_clip, | |
398 | .cpo_cancel = osc_page_cancel, | |
399 | .cpo_flush = osc_page_flush | |
400 | }; | |
401 | ||
402 | int osc_page_init(const struct lu_env *env, struct cl_object *obj, | |
403 | struct cl_page *page, struct page *vmpage) | |
404 | { | |
405 | struct osc_object *osc = cl2osc(obj); | |
29ac6840 | 406 | struct osc_page *opg = cl_object_page_slice(obj, page); |
d7e09d03 PT |
407 | int result; |
408 | ||
409 | opg->ops_from = 0; | |
29ac6840 | 410 | opg->ops_to = PAGE_CACHE_SIZE; |
d7e09d03 PT |
411 | |
412 | result = osc_prep_async_page(osc, opg, vmpage, | |
413 | cl_offset(obj, page->cp_index)); | |
414 | if (result == 0) { | |
415 | struct osc_io *oio = osc_env_io(env); | |
50ffcb7e | 416 | |
d7e09d03 PT |
417 | opg->ops_srvlock = osc_io_srvlock(oio); |
418 | cl_page_slice_add(page, &opg->ops_cl, obj, | |
419 | &osc_page_ops); | |
420 | } | |
421 | /* | |
422 | * Cannot assert osc_page_protected() here as read-ahead | |
423 | * creates temporary pages outside of a lock. | |
424 | */ | |
425 | /* ops_inflight and ops_lru are the same field, but it doesn't | |
426 | * hurt to initialize it twice :-) */ | |
427 | INIT_LIST_HEAD(&opg->ops_inflight); | |
428 | INIT_LIST_HEAD(&opg->ops_lru); | |
429 | ||
430 | /* reserve an LRU space for this page */ | |
431 | if (page->cp_type == CPT_CACHEABLE && result == 0) | |
432 | result = osc_lru_reserve(env, osc, opg); | |
433 | ||
434 | return result; | |
435 | } | |
436 | ||
437 | /** | |
438 | * Helper function called by osc_io_submit() for every page in an immediate | |
439 | * transfer (i.e., transferred synchronously). | |
440 | */ | |
441 | void osc_page_submit(const struct lu_env *env, struct osc_page *opg, | |
442 | enum cl_req_type crt, int brw_flags) | |
443 | { | |
444 | struct osc_async_page *oap = &opg->ops_oap; | |
29ac6840 | 445 | struct osc_object *obj = oap->oap_obj; |
d7e09d03 PT |
446 | |
447 | LINVRNT(osc_page_protected(env, opg, | |
448 | crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1)); | |
449 | ||
2d00bd17 JP |
450 | LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n", |
451 | oap, oap->oap_magic); | |
d7e09d03 PT |
452 | LASSERT(oap->oap_async_flags & ASYNC_READY); |
453 | LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE); | |
454 | ||
29ac6840 CH |
455 | oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ; |
456 | oap->oap_page_off = opg->ops_from; | |
457 | oap->oap_count = opg->ops_to - opg->ops_from; | |
40daf5ce | 458 | oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; |
d7e09d03 PT |
459 | |
460 | if (!client_is_remote(osc_export(obj)) && | |
2eb90a75 | 461 | capable(CFS_CAP_SYS_RESOURCE)) { |
d7e09d03 PT |
462 | oap->oap_brw_flags |= OBD_BRW_NOQUOTA; |
463 | oap->oap_cmd |= OBD_BRW_NOQUOTA; | |
464 | } | |
465 | ||
466 | opg->ops_submit_time = cfs_time_current(); | |
467 | osc_page_transfer_get(opg, "transfer\0imm"); | |
468 | osc_page_transfer_add(env, opg, crt); | |
469 | } | |
470 | ||
471 | /* --------------- LRU page management ------------------ */ | |
472 | ||
473 | /* OSC is a natural place to manage LRU pages as applications are specialized | |
474 | * to write OSC by OSC. Ideally, if one OSC is used more frequently it should | |
475 | * occupy more LRU slots. On the other hand, we should avoid using up all LRU | |
476 | * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep | |
477 | * for free LRU slots - this will be very bad so the algorithm requires each | |
478 | * OSC to free slots voluntarily to maintain a reasonable number of free slots | |
479 | * at any time. | |
480 | */ | |
481 | ||
2f5723a1 | 482 | static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); |
d7e09d03 PT |
483 | static atomic_t osc_lru_waiters = ATOMIC_INIT(0); |
484 | /* LRU pages are freed in batch mode. OSC should at least free this | |
485 | * number of pages to avoid running out of LRU budget, and.. */ | |
486 | static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ | |
11d66e89 | 487 | /* free this number at most otherwise it will take too long time to finish. */ |
d7e09d03 PT |
488 | static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ |
489 | ||
490 | /* Check if we can free LRU slots from this OSC. If there exists LRU waiters, | |
491 | * we should free slots aggressively. In this way, slots are freed in a steady | |
492 | * step to maintain fairness among OSCs. | |
493 | * | |
494 | * Return how many LRU pages should be freed. */ | |
495 | static int osc_cache_too_much(struct client_obd *cli) | |
496 | { | |
497 | struct cl_client_cache *cache = cli->cl_cache; | |
498 | int pages = atomic_read(&cli->cl_lru_in_list) >> 1; | |
499 | ||
500 | if (atomic_read(&osc_lru_waiters) > 0 && | |
501 | atomic_read(cli->cl_lru_left) < lru_shrink_max) | |
502 | /* drop lru pages aggressively */ | |
503 | return min(pages, lru_shrink_max); | |
504 | ||
505 | /* if it's going to run out LRU slots, we should free some, but not | |
11d66e89 | 506 | * too much to maintain fairness among OSCs. */ |
d7e09d03 PT |
507 | if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { |
508 | unsigned long tmp; | |
509 | ||
510 | tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users); | |
511 | if (pages > tmp) | |
512 | return min(pages, lru_shrink_max); | |
513 | ||
514 | return pages > lru_shrink_min ? lru_shrink_min : 0; | |
515 | } | |
516 | ||
517 | return 0; | |
518 | } | |
519 | ||
520 | /* Return how many pages are not discarded in @pvec. */ | |
521 | static int discard_pagevec(const struct lu_env *env, struct cl_io *io, | |
522 | struct cl_page **pvec, int max_index) | |
523 | { | |
524 | int count; | |
525 | int i; | |
526 | ||
527 | for (count = 0, i = 0; i < max_index; i++) { | |
528 | struct cl_page *page = pvec[i]; | |
50ffcb7e | 529 | |
d7e09d03 PT |
530 | if (cl_page_own_try(env, io, page) == 0) { |
531 | /* free LRU page only if nobody is using it. | |
532 | * This check is necessary to avoid freeing the pages | |
533 | * having already been removed from LRU and pinned | |
534 | * for IO. */ | |
535 | if (!cl_page_in_use(page)) { | |
536 | cl_page_unmap(env, io, page); | |
537 | cl_page_discard(env, io, page); | |
538 | ++count; | |
539 | } | |
540 | cl_page_disown(env, io, page); | |
541 | } | |
542 | cl_page_put(env, page); | |
543 | pvec[i] = NULL; | |
544 | } | |
545 | return max_index - count; | |
546 | } | |
547 | ||
548 | /** | |
549 | * Drop @target of pages from LRU at most. | |
550 | */ | |
551 | int osc_lru_shrink(struct client_obd *cli, int target) | |
552 | { | |
553 | struct cl_env_nest nest; | |
554 | struct lu_env *env; | |
555 | struct cl_io *io; | |
556 | struct cl_object *clobj = NULL; | |
557 | struct cl_page **pvec; | |
558 | struct osc_page *opg; | |
559 | int maxscan = 0; | |
560 | int count = 0; | |
561 | int index = 0; | |
562 | int rc = 0; | |
d7e09d03 PT |
563 | |
564 | LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0); | |
565 | if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0) | |
0a3bdb00 | 566 | return 0; |
d7e09d03 PT |
567 | |
568 | env = cl_env_nested_get(&nest); | |
569 | if (IS_ERR(env)) | |
0a3bdb00 | 570 | return PTR_ERR(env); |
d7e09d03 PT |
571 | |
572 | pvec = osc_env_info(env)->oti_pvec; | |
573 | io = &osc_env_info(env)->oti_io; | |
574 | ||
575 | client_obd_list_lock(&cli->cl_lru_list_lock); | |
576 | atomic_inc(&cli->cl_lru_shrinkers); | |
577 | maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list)); | |
578 | while (!list_empty(&cli->cl_lru_list)) { | |
579 | struct cl_page *page; | |
580 | ||
581 | if (--maxscan < 0) | |
582 | break; | |
583 | ||
584 | opg = list_entry(cli->cl_lru_list.next, struct osc_page, | |
585 | ops_lru); | |
586 | page = cl_page_top(opg->ops_cl.cpl_page); | |
587 | if (cl_page_in_use_noref(page)) { | |
588 | list_move_tail(&opg->ops_lru, &cli->cl_lru_list); | |
589 | continue; | |
590 | } | |
591 | ||
7f1ae4c0 | 592 | LASSERT(page->cp_obj); |
d7e09d03 PT |
593 | if (clobj != page->cp_obj) { |
594 | struct cl_object *tmp = page->cp_obj; | |
595 | ||
596 | cl_object_get(tmp); | |
597 | client_obd_list_unlock(&cli->cl_lru_list_lock); | |
598 | ||
7f1ae4c0 | 599 | if (clobj) { |
d7e09d03 PT |
600 | count -= discard_pagevec(env, io, pvec, index); |
601 | index = 0; | |
602 | ||
603 | cl_io_fini(env, io); | |
604 | cl_object_put(env, clobj); | |
605 | clobj = NULL; | |
606 | } | |
607 | ||
608 | clobj = tmp; | |
609 | io->ci_obj = clobj; | |
610 | io->ci_ignore_layout = 1; | |
611 | rc = cl_io_init(env, io, CIT_MISC, clobj); | |
612 | ||
613 | client_obd_list_lock(&cli->cl_lru_list_lock); | |
614 | ||
615 | if (rc != 0) | |
616 | break; | |
617 | ||
618 | ++maxscan; | |
619 | continue; | |
620 | } | |
621 | ||
622 | /* move this page to the end of list as it will be discarded | |
623 | * soon. The page will be finally removed from LRU list in | |
624 | * osc_page_delete(). */ | |
625 | list_move_tail(&opg->ops_lru, &cli->cl_lru_list); | |
626 | ||
627 | /* it's okay to grab a refcount here w/o holding lock because | |
628 | * it has to grab cl_lru_list_lock to delete the page. */ | |
629 | cl_page_get(page); | |
630 | pvec[index++] = page; | |
631 | if (++count >= target) | |
632 | break; | |
633 | ||
634 | if (unlikely(index == OTI_PVEC_SIZE)) { | |
635 | client_obd_list_unlock(&cli->cl_lru_list_lock); | |
636 | count -= discard_pagevec(env, io, pvec, index); | |
637 | index = 0; | |
638 | ||
639 | client_obd_list_lock(&cli->cl_lru_list_lock); | |
640 | } | |
641 | } | |
642 | client_obd_list_unlock(&cli->cl_lru_list_lock); | |
643 | ||
7f1ae4c0 | 644 | if (clobj) { |
d7e09d03 PT |
645 | count -= discard_pagevec(env, io, pvec, index); |
646 | ||
647 | cl_io_fini(env, io); | |
648 | cl_object_put(env, clobj); | |
649 | } | |
650 | cl_env_nested_put(&nest, env); | |
651 | ||
652 | atomic_dec(&cli->cl_lru_shrinkers); | |
0a3bdb00 | 653 | return count > 0 ? count : rc; |
d7e09d03 PT |
654 | } |
655 | ||
656 | static void osc_lru_add(struct client_obd *cli, struct osc_page *opg) | |
657 | { | |
658 | bool wakeup = false; | |
659 | ||
660 | if (!opg->ops_in_lru) | |
661 | return; | |
662 | ||
663 | atomic_dec(&cli->cl_lru_busy); | |
664 | client_obd_list_lock(&cli->cl_lru_list_lock); | |
665 | if (list_empty(&opg->ops_lru)) { | |
666 | list_move_tail(&opg->ops_lru, &cli->cl_lru_list); | |
667 | atomic_inc_return(&cli->cl_lru_in_list); | |
668 | wakeup = atomic_read(&osc_lru_waiters) > 0; | |
669 | } | |
670 | client_obd_list_unlock(&cli->cl_lru_list_lock); | |
671 | ||
672 | if (wakeup) { | |
673 | osc_lru_shrink(cli, osc_cache_too_much(cli)); | |
674 | wake_up_all(&osc_lru_waitq); | |
675 | } | |
676 | } | |
677 | ||
678 | /* delete page from LRUlist. The page can be deleted from LRUlist for two | |
679 | * reasons: redirtied or deleted from page cache. */ | |
680 | static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) | |
681 | { | |
682 | if (opg->ops_in_lru) { | |
683 | client_obd_list_lock(&cli->cl_lru_list_lock); | |
684 | if (!list_empty(&opg->ops_lru)) { | |
685 | LASSERT(atomic_read(&cli->cl_lru_in_list) > 0); | |
686 | list_del_init(&opg->ops_lru); | |
687 | atomic_dec(&cli->cl_lru_in_list); | |
688 | if (!del) | |
689 | atomic_inc(&cli->cl_lru_busy); | |
690 | } else if (del) { | |
691 | LASSERT(atomic_read(&cli->cl_lru_busy) > 0); | |
692 | atomic_dec(&cli->cl_lru_busy); | |
693 | } | |
694 | client_obd_list_unlock(&cli->cl_lru_list_lock); | |
695 | if (del) { | |
696 | atomic_inc(cli->cl_lru_left); | |
697 | /* this is a great place to release more LRU pages if | |
698 | * this osc occupies too many LRU pages and kernel is | |
699 | * stealing one of them. | |
700 | * cl_lru_shrinkers is to avoid recursive call in case | |
701 | * we're already in the context of osc_lru_shrink(). */ | |
cad6fafa BJ |
702 | if (atomic_read(&cli->cl_lru_shrinkers) == 0 && |
703 | !memory_pressure_get()) | |
d7e09d03 PT |
704 | osc_lru_shrink(cli, osc_cache_too_much(cli)); |
705 | wake_up(&osc_lru_waitq); | |
706 | } | |
707 | } else { | |
708 | LASSERT(list_empty(&opg->ops_lru)); | |
709 | } | |
710 | } | |
711 | ||
712 | static inline int max_to_shrink(struct client_obd *cli) | |
713 | { | |
714 | return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max); | |
715 | } | |
716 | ||
717 | static int osc_lru_reclaim(struct client_obd *cli) | |
718 | { | |
719 | struct cl_client_cache *cache = cli->cl_cache; | |
720 | int max_scans; | |
721 | int rc; | |
722 | ||
7f1ae4c0 | 723 | LASSERT(cache); |
d7e09d03 PT |
724 | |
725 | rc = osc_lru_shrink(cli, lru_shrink_min); | |
726 | if (rc != 0) { | |
727 | CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n", | |
728 | cli->cl_import->imp_obd->obd_name, rc, cli); | |
729 | return rc; | |
730 | } | |
731 | ||
732 | CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", | |
733 | cli->cl_import->imp_obd->obd_name, cli, | |
734 | atomic_read(&cli->cl_lru_in_list), | |
735 | atomic_read(&cli->cl_lru_busy)); | |
736 | ||
737 | /* Reclaim LRU slots from other client_obd as it can't free enough | |
738 | * from its own. This should rarely happen. */ | |
739 | spin_lock(&cache->ccc_lru_lock); | |
0df83c18 HN |
740 | LASSERT(!list_empty(&cache->ccc_lru)); |
741 | ||
d7e09d03 PT |
742 | cache->ccc_lru_shrinkers++; |
743 | list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); | |
744 | ||
745 | max_scans = atomic_read(&cache->ccc_users); | |
746 | while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { | |
747 | cli = list_entry(cache->ccc_lru.next, struct client_obd, | |
748 | cl_lru_osc); | |
749 | ||
750 | CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n", | |
751 | cli->cl_import->imp_obd->obd_name, cli, | |
752 | atomic_read(&cli->cl_lru_in_list), | |
753 | atomic_read(&cli->cl_lru_busy)); | |
754 | ||
755 | list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); | |
756 | if (atomic_read(&cli->cl_lru_in_list) > 0) { | |
757 | spin_unlock(&cache->ccc_lru_lock); | |
758 | ||
759 | rc = osc_lru_shrink(cli, max_to_shrink(cli)); | |
760 | spin_lock(&cache->ccc_lru_lock); | |
761 | if (rc != 0) | |
762 | break; | |
763 | } | |
764 | } | |
765 | spin_unlock(&cache->ccc_lru_lock); | |
766 | ||
767 | CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n", | |
768 | cli->cl_import->imp_obd->obd_name, cli, rc); | |
769 | return rc; | |
770 | } | |
771 | ||
772 | static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, | |
773 | struct osc_page *opg) | |
774 | { | |
775 | struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); | |
776 | struct client_obd *cli = osc_cli(obj); | |
777 | int rc = 0; | |
d7e09d03 | 778 | |
7f1ae4c0 | 779 | if (!cli->cl_cache) /* shall not be in LRU */ |
0a3bdb00 | 780 | return 0; |
d7e09d03 PT |
781 | |
782 | LASSERT(atomic_read(cli->cl_lru_left) >= 0); | |
305ec768 | 783 | while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) { |
d7e09d03 PT |
784 | int gen; |
785 | ||
786 | /* run out of LRU spaces, try to drop some by itself */ | |
787 | rc = osc_lru_reclaim(cli); | |
788 | if (rc < 0) | |
789 | break; | |
790 | if (rc > 0) | |
791 | continue; | |
792 | ||
793 | cond_resched(); | |
794 | ||
795 | /* slowest case, all of caching pages are busy, notifying | |
796 | * other OSCs that we're lack of LRU slots. */ | |
797 | atomic_inc(&osc_lru_waiters); | |
798 | ||
799 | gen = atomic_read(&cli->cl_lru_in_list); | |
800 | rc = l_wait_event(osc_lru_waitq, | |
801 | atomic_read(cli->cl_lru_left) > 0 || | |
802 | (atomic_read(&cli->cl_lru_in_list) > 0 && | |
803 | gen != atomic_read(&cli->cl_lru_in_list)), | |
804 | &lwi); | |
805 | ||
806 | atomic_dec(&osc_lru_waiters); | |
807 | if (rc < 0) | |
808 | break; | |
809 | } | |
810 | ||
811 | if (rc >= 0) { | |
812 | atomic_inc(&cli->cl_lru_busy); | |
813 | opg->ops_in_lru = 1; | |
814 | rc = 0; | |
815 | } | |
816 | ||
0a3bdb00 | 817 | return rc; |
d7e09d03 PT |
818 | } |
819 | ||
820 | /** @} osc */ |