staging: lustre: mdc: use __FMODE_EXEC macro
[linux-2.6-block.git] / drivers / staging / lustre / lustre / osc / osc_page.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * Implementation of cl_page for OSC layer.
37 *
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 */
40
41#define DEBUG_SUBSYSTEM S_OSC
42
43#include "osc_cl_internal.h"
44
45static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
46static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
47static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
48 struct osc_page *opg);
49
50/** \addtogroup osc
51 * @{
52 */
53
54/*
55 * Comment out osc_page_protected because it may sleep inside the
56 * the client_obd_list_lock.
57 * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
58 * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
59 * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
60 */
61#if 0
62static int osc_page_is_dlocked(const struct lu_env *env,
63 const struct osc_page *opg,
64 enum cl_lock_mode mode, int pending, int unref)
65{
66 struct cl_page *page;
67 struct osc_object *obj;
68 struct osc_thread_info *info;
69 struct ldlm_res_id *resname;
70 struct lustre_handle *lockh;
71 ldlm_policy_data_t *policy;
72 ldlm_mode_t dlmmode;
875332d4 73 __u64 flags;
d7e09d03
PT
74
75 might_sleep();
76
77 info = osc_env_info(env);
78 resname = &info->oti_resname;
79 policy = &info->oti_policy;
80 lockh = &info->oti_handle;
81 page = opg->ops_cl.cpl_page;
82 obj = cl2osc(opg->ops_cl.cpl_obj);
83
84 flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
85 if (pending)
86 flags |= LDLM_FL_CBPENDING;
87
88 dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
89 osc_lock_build_res(env, obj, resname);
90 osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
91 return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
92 dlmmode, &flags, NULL, lockh, unref);
93}
94
95/**
96 * Checks an invariant that a page in the cache is covered by a lock, as
97 * needed.
98 */
99static int osc_page_protected(const struct lu_env *env,
100 const struct osc_page *opg,
101 enum cl_lock_mode mode, int unref)
102{
103 struct cl_object_header *hdr;
104 struct cl_lock *scan;
105 struct cl_page *page;
106 struct cl_lock_descr *descr;
107 int result;
108
109 LINVRNT(!opg->ops_temp);
110
111 page = opg->ops_cl.cpl_page;
112 if (page->cp_owner != NULL &&
113 cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
114 /*
115 * If IO is done without locks (liblustre, or lloop), lock is
116 * not required.
117 */
118 result = 1;
119 else
120 /* otherwise check for a DLM lock */
121 result = osc_page_is_dlocked(env, opg, mode, 1, unref);
122 if (result == 0) {
123 /* maybe this page is a part of a lockless io? */
124 hdr = cl_object_header(opg->ops_cl.cpl_obj);
125 descr = &osc_env_info(env)->oti_descr;
126 descr->cld_mode = mode;
127 descr->cld_start = page->cp_index;
128 descr->cld_end = page->cp_index;
129 spin_lock(&hdr->coh_lock_guard);
130 list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
131 /*
132 * Lock-less sub-lock has to be either in HELD state
133 * (when io is actively going on), or in CACHED state,
134 * when top-lock is being unlocked:
135 * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
136 */
137 if ((scan->cll_state == CLS_HELD ||
138 scan->cll_state == CLS_CACHED) &&
139 cl_lock_ext_match(&scan->cll_descr, descr)) {
140 struct osc_lock *olck;
141
142 olck = osc_lock_at(scan);
143 result = osc_lock_is_lockless(olck);
144 break;
145 }
146 }
147 spin_unlock(&hdr->coh_lock_guard);
148 }
149 return result;
150}
151#else
152static int osc_page_protected(const struct lu_env *env,
153 const struct osc_page *opg,
154 enum cl_lock_mode mode, int unref)
155{
156 return 1;
157}
158#endif
159
160/*****************************************************************************
161 *
162 * Page operations.
163 *
164 */
165static void osc_page_fini(const struct lu_env *env,
166 struct cl_page_slice *slice)
167{
168 struct osc_page *opg = cl2osc_page(slice);
169 CDEBUG(D_TRACE, "%p\n", opg);
170 LASSERT(opg->ops_lock == NULL);
171}
172
173static void osc_page_transfer_get(struct osc_page *opg, const char *label)
174{
175 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
176
177 LASSERT(!opg->ops_transfer_pinned);
178 cl_page_get(page);
179 lu_ref_add_atomic(&page->cp_reference, label, page);
180 opg->ops_transfer_pinned = 1;
181}
182
183static void osc_page_transfer_put(const struct lu_env *env,
184 struct osc_page *opg)
185{
186 struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
187
188 if (opg->ops_transfer_pinned) {
189 lu_ref_del(&page->cp_reference, "transfer", page);
190 opg->ops_transfer_pinned = 0;
191 cl_page_put(env, page);
192 }
193}
194
195/**
196 * This is called once for every page when it is submitted for a transfer
197 * either opportunistic (osc_page_cache_add()), or immediate
198 * (osc_page_submit()).
199 */
200static void osc_page_transfer_add(const struct lu_env *env,
201 struct osc_page *opg, enum cl_req_type crt)
202{
203 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
204
205 /* ops_lru and ops_inflight share the same field, so take it from LRU
206 * first and then use it as inflight. */
207 osc_lru_del(osc_cli(obj), opg, false);
208
209 spin_lock(&obj->oo_seatbelt);
210 list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
211 opg->ops_submitter = current;
212 spin_unlock(&obj->oo_seatbelt);
213}
214
215static int osc_page_cache_add(const struct lu_env *env,
216 const struct cl_page_slice *slice,
217 struct cl_io *io)
218{
219 struct osc_io *oio = osc_env_io(env);
220 struct osc_page *opg = cl2osc_page(slice);
221 int result;
d7e09d03
PT
222
223 LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
224
225 osc_page_transfer_get(opg, "transfer\0cache");
226 result = osc_queue_async_io(env, io, opg);
227 if (result != 0)
228 osc_page_transfer_put(env, opg);
229 else
230 osc_page_transfer_add(env, opg, CRT_WRITE);
231
232 /* for sync write, kernel will wait for this page to be flushed before
233 * osc_io_end() is called, so release it earlier.
234 * for mkwrite(), it's known there is no further pages. */
235 if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
236 if (oio->oi_active != NULL) {
237 osc_extent_release(env, oio->oi_active);
238 oio->oi_active = NULL;
239 }
240 }
241
0a3bdb00 242 return result;
d7e09d03
PT
243}
244
245void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
246 pgoff_t start, pgoff_t end)
247{
ec83e611 248 memset(policy, 0, sizeof(*policy));
d7e09d03
PT
249 policy->l_extent.start = cl_offset(obj, start);
250 policy->l_extent.end = cl_offset(obj, end + 1) - 1;
251}
252
253static int osc_page_addref_lock(const struct lu_env *env,
254 struct osc_page *opg,
255 struct cl_lock *lock)
256{
257 struct osc_lock *olock;
258 int rc;
259
260 LASSERT(opg->ops_lock == NULL);
261
262 olock = osc_lock_at(lock);
263 if (atomic_inc_return(&olock->ols_pageref) <= 0) {
264 atomic_dec(&olock->ols_pageref);
265 rc = -ENODATA;
266 } else {
267 cl_lock_get(lock);
268 opg->ops_lock = lock;
269 rc = 0;
270 }
271 return rc;
272}
273
274static void osc_page_putref_lock(const struct lu_env *env,
275 struct osc_page *opg)
276{
277 struct cl_lock *lock = opg->ops_lock;
278 struct osc_lock *olock;
279
280 LASSERT(lock != NULL);
281 olock = osc_lock_at(lock);
282
283 atomic_dec(&olock->ols_pageref);
284 opg->ops_lock = NULL;
285
286 cl_lock_put(env, lock);
287}
288
289static int osc_page_is_under_lock(const struct lu_env *env,
290 const struct cl_page_slice *slice,
291 struct cl_io *unused)
292{
293 struct cl_lock *lock;
294 int result = -ENODATA;
295
d7e09d03
PT
296 lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
297 NULL, 1, 0);
298 if (lock != NULL) {
299 if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
300 result = -EBUSY;
301 cl_lock_put(env, lock);
302 }
0a3bdb00 303 return result;
d7e09d03
PT
304}
305
306static void osc_page_disown(const struct lu_env *env,
307 const struct cl_page_slice *slice,
308 struct cl_io *io)
309{
310 struct osc_page *opg = cl2osc_page(slice);
311
312 if (unlikely(opg->ops_lock))
313 osc_page_putref_lock(env, opg);
314}
315
316static void osc_page_completion_read(const struct lu_env *env,
317 const struct cl_page_slice *slice,
318 int ioret)
319{
320 struct osc_page *opg = cl2osc_page(slice);
321 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
322
323 if (likely(opg->ops_lock))
324 osc_page_putref_lock(env, opg);
325 osc_lru_add(osc_cli(obj), opg);
326}
327
328static void osc_page_completion_write(const struct lu_env *env,
329 const struct cl_page_slice *slice,
330 int ioret)
331{
332 struct osc_page *opg = cl2osc_page(slice);
333 struct osc_object *obj = cl2osc(slice->cpl_obj);
334
335 osc_lru_add(osc_cli(obj), opg);
336}
337
338static int osc_page_fail(const struct lu_env *env,
339 const struct cl_page_slice *slice,
340 struct cl_io *unused)
341{
342 /*
343 * Cached read?
344 */
345 LBUG();
346 return 0;
347}
348
349
350static const char *osc_list(struct list_head *head)
351{
352 return list_empty(head) ? "-" : "+";
353}
354
a649ad1d 355static inline unsigned long osc_submit_duration(struct osc_page *opg)
d7e09d03
PT
356{
357 if (opg->ops_submit_time == 0)
358 return 0;
359
360 return (cfs_time_current() - opg->ops_submit_time);
361}
362
363static int osc_page_print(const struct lu_env *env,
364 const struct cl_page_slice *slice,
365 void *cookie, lu_printer_t printer)
366{
367 struct osc_page *opg = cl2osc_page(slice);
368 struct osc_async_page *oap = &opg->ops_oap;
369 struct osc_object *obj = cl2osc(slice->cpl_obj);
370 struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
371
372 return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
373 "1< %#x %d %u %s %s > "
b0f5aad5 374 "2< %llu %u %u %#x %#x | %p %p %p > "
d7e09d03
PT
375 "3< %s %p %d %lu %d > "
376 "4< %d %d %d %lu %s | %s %s %s %s > "
377 "5< %s %s %s %s | %d %s | %d %s %s>\n",
378 opg,
379 /* 1 */
380 oap->oap_magic, oap->oap_cmd,
381 oap->oap_interrupted,
382 osc_list(&oap->oap_pending_item),
383 osc_list(&oap->oap_rpc_item),
384 /* 2 */
385 oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
386 oap->oap_async_flags, oap->oap_brw_flags,
387 oap->oap_request, oap->oap_cli, obj,
388 /* 3 */
389 osc_list(&opg->ops_inflight),
390 opg->ops_submitter, opg->ops_transfer_pinned,
391 osc_submit_duration(opg), opg->ops_srvlock,
392 /* 4 */
393 cli->cl_r_in_flight, cli->cl_w_in_flight,
394 cli->cl_max_rpcs_in_flight,
395 cli->cl_avail_grant,
396 osc_list(&cli->cl_cache_waiters),
397 osc_list(&cli->cl_loi_ready_list),
398 osc_list(&cli->cl_loi_hp_ready_list),
399 osc_list(&cli->cl_loi_write_list),
400 osc_list(&cli->cl_loi_read_list),
401 /* 5 */
402 osc_list(&obj->oo_ready_item),
403 osc_list(&obj->oo_hp_ready_item),
404 osc_list(&obj->oo_write_item),
405 osc_list(&obj->oo_read_item),
406 atomic_read(&obj->oo_nr_reads),
407 osc_list(&obj->oo_reading_exts),
408 atomic_read(&obj->oo_nr_writes),
409 osc_list(&obj->oo_hp_exts),
410 osc_list(&obj->oo_urgent_exts));
411}
412
413static void osc_page_delete(const struct lu_env *env,
414 const struct cl_page_slice *slice)
415{
416 struct osc_page *opg = cl2osc_page(slice);
417 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
418 int rc;
419
420 LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
421
d7e09d03
PT
422 CDEBUG(D_TRACE, "%p\n", opg);
423 osc_page_transfer_put(env, opg);
424 rc = osc_teardown_async_page(env, obj, opg);
425 if (rc) {
426 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
427 "Trying to teardown failed: %d\n", rc);
428 LASSERT(0);
429 }
430
431 spin_lock(&obj->oo_seatbelt);
432 if (opg->ops_submitter != NULL) {
433 LASSERT(!list_empty(&opg->ops_inflight));
434 list_del_init(&opg->ops_inflight);
435 opg->ops_submitter = NULL;
436 }
437 spin_unlock(&obj->oo_seatbelt);
438
439 osc_lru_del(osc_cli(obj), opg, true);
d7e09d03
PT
440}
441
442void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
443 int from, int to)
444{
445 struct osc_page *opg = cl2osc_page(slice);
446 struct osc_async_page *oap = &opg->ops_oap;
447
448 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
449
450 opg->ops_from = from;
451 opg->ops_to = to;
452 spin_lock(&oap->oap_lock);
453 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
454 spin_unlock(&oap->oap_lock);
455}
456
457static int osc_page_cancel(const struct lu_env *env,
458 const struct cl_page_slice *slice)
459{
460 struct osc_page *opg = cl2osc_page(slice);
461 int rc = 0;
462
463 LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
464
465 /* Check if the transferring against this page
466 * is completed, or not even queued. */
467 if (opg->ops_transfer_pinned)
468 /* FIXME: may not be interrupted.. */
469 rc = osc_cancel_async_page(env, opg);
470 LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
471 return rc;
472}
473
474static int osc_page_flush(const struct lu_env *env,
475 const struct cl_page_slice *slice,
476 struct cl_io *io)
477{
478 struct osc_page *opg = cl2osc_page(slice);
479 int rc = 0;
29aaf496 480
d7e09d03 481 rc = osc_flush_async_page(env, io, opg);
0a3bdb00 482 return rc;
d7e09d03
PT
483}
484
485static const struct cl_page_operations osc_page_ops = {
486 .cpo_fini = osc_page_fini,
487 .cpo_print = osc_page_print,
488 .cpo_delete = osc_page_delete,
489 .cpo_is_under_lock = osc_page_is_under_lock,
490 .cpo_disown = osc_page_disown,
491 .io = {
492 [CRT_READ] = {
493 .cpo_cache_add = osc_page_fail,
494 .cpo_completion = osc_page_completion_read
495 },
496 [CRT_WRITE] = {
497 .cpo_cache_add = osc_page_cache_add,
498 .cpo_completion = osc_page_completion_write
499 }
500 },
501 .cpo_clip = osc_page_clip,
502 .cpo_cancel = osc_page_cancel,
503 .cpo_flush = osc_page_flush
504};
505
506int osc_page_init(const struct lu_env *env, struct cl_object *obj,
507 struct cl_page *page, struct page *vmpage)
508{
509 struct osc_object *osc = cl2osc(obj);
510 struct osc_page *opg = cl_object_page_slice(obj, page);
511 int result;
512
513 opg->ops_from = 0;
514 opg->ops_to = PAGE_CACHE_SIZE;
515
516 result = osc_prep_async_page(osc, opg, vmpage,
517 cl_offset(obj, page->cp_index));
518 if (result == 0) {
519 struct osc_io *oio = osc_env_io(env);
520 opg->ops_srvlock = osc_io_srvlock(oio);
521 cl_page_slice_add(page, &opg->ops_cl, obj,
522 &osc_page_ops);
523 }
524 /*
525 * Cannot assert osc_page_protected() here as read-ahead
526 * creates temporary pages outside of a lock.
527 */
528 /* ops_inflight and ops_lru are the same field, but it doesn't
529 * hurt to initialize it twice :-) */
530 INIT_LIST_HEAD(&opg->ops_inflight);
531 INIT_LIST_HEAD(&opg->ops_lru);
532
533 /* reserve an LRU space for this page */
534 if (page->cp_type == CPT_CACHEABLE && result == 0)
535 result = osc_lru_reserve(env, osc, opg);
536
537 return result;
538}
539
540/**
541 * Helper function called by osc_io_submit() for every page in an immediate
542 * transfer (i.e., transferred synchronously).
543 */
544void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
545 enum cl_req_type crt, int brw_flags)
546{
547 struct osc_async_page *oap = &opg->ops_oap;
548 struct osc_object *obj = oap->oap_obj;
549
550 LINVRNT(osc_page_protected(env, opg,
551 crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
552
553 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
554 "magic 0x%x\n", oap, oap->oap_magic);
555 LASSERT(oap->oap_async_flags & ASYNC_READY);
556 LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
557
558 oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
559 oap->oap_page_off = opg->ops_from;
560 oap->oap_count = opg->ops_to - opg->ops_from;
561 oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
562
563 if (!client_is_remote(osc_export(obj)) &&
2eb90a75 564 capable(CFS_CAP_SYS_RESOURCE)) {
d7e09d03
PT
565 oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
566 oap->oap_cmd |= OBD_BRW_NOQUOTA;
567 }
568
569 opg->ops_submit_time = cfs_time_current();
570 osc_page_transfer_get(opg, "transfer\0imm");
571 osc_page_transfer_add(env, opg, crt);
572}
573
574/* --------------- LRU page management ------------------ */
575
576/* OSC is a natural place to manage LRU pages as applications are specialized
577 * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
578 * occupy more LRU slots. On the other hand, we should avoid using up all LRU
579 * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
580 * for free LRU slots - this will be very bad so the algorithm requires each
581 * OSC to free slots voluntarily to maintain a reasonable number of free slots
582 * at any time.
583 */
584
2f5723a1 585static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
d7e09d03
PT
586static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
587/* LRU pages are freed in batch mode. OSC should at least free this
588 * number of pages to avoid running out of LRU budget, and.. */
589static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
11d66e89 590/* free this number at most otherwise it will take too long time to finish. */
d7e09d03
PT
591static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
592
593/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
594 * we should free slots aggressively. In this way, slots are freed in a steady
595 * step to maintain fairness among OSCs.
596 *
597 * Return how many LRU pages should be freed. */
598static int osc_cache_too_much(struct client_obd *cli)
599{
600 struct cl_client_cache *cache = cli->cl_cache;
601 int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
602
603 if (atomic_read(&osc_lru_waiters) > 0 &&
604 atomic_read(cli->cl_lru_left) < lru_shrink_max)
605 /* drop lru pages aggressively */
606 return min(pages, lru_shrink_max);
607
608 /* if it's going to run out LRU slots, we should free some, but not
11d66e89 609 * too much to maintain fairness among OSCs. */
d7e09d03
PT
610 if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
611 unsigned long tmp;
612
613 tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
614 if (pages > tmp)
615 return min(pages, lru_shrink_max);
616
617 return pages > lru_shrink_min ? lru_shrink_min : 0;
618 }
619
620 return 0;
621}
622
623/* Return how many pages are not discarded in @pvec. */
624static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
625 struct cl_page **pvec, int max_index)
626{
627 int count;
628 int i;
629
630 for (count = 0, i = 0; i < max_index; i++) {
631 struct cl_page *page = pvec[i];
632 if (cl_page_own_try(env, io, page) == 0) {
633 /* free LRU page only if nobody is using it.
634 * This check is necessary to avoid freeing the pages
635 * having already been removed from LRU and pinned
636 * for IO. */
637 if (!cl_page_in_use(page)) {
638 cl_page_unmap(env, io, page);
639 cl_page_discard(env, io, page);
640 ++count;
641 }
642 cl_page_disown(env, io, page);
643 }
644 cl_page_put(env, page);
645 pvec[i] = NULL;
646 }
647 return max_index - count;
648}
649
650/**
651 * Drop @target of pages from LRU at most.
652 */
653int osc_lru_shrink(struct client_obd *cli, int target)
654{
655 struct cl_env_nest nest;
656 struct lu_env *env;
657 struct cl_io *io;
658 struct cl_object *clobj = NULL;
659 struct cl_page **pvec;
660 struct osc_page *opg;
661 int maxscan = 0;
662 int count = 0;
663 int index = 0;
664 int rc = 0;
d7e09d03
PT
665
666 LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
667 if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
0a3bdb00 668 return 0;
d7e09d03
PT
669
670 env = cl_env_nested_get(&nest);
671 if (IS_ERR(env))
0a3bdb00 672 return PTR_ERR(env);
d7e09d03
PT
673
674 pvec = osc_env_info(env)->oti_pvec;
675 io = &osc_env_info(env)->oti_io;
676
677 client_obd_list_lock(&cli->cl_lru_list_lock);
678 atomic_inc(&cli->cl_lru_shrinkers);
679 maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
680 while (!list_empty(&cli->cl_lru_list)) {
681 struct cl_page *page;
682
683 if (--maxscan < 0)
684 break;
685
686 opg = list_entry(cli->cl_lru_list.next, struct osc_page,
687 ops_lru);
688 page = cl_page_top(opg->ops_cl.cpl_page);
689 if (cl_page_in_use_noref(page)) {
690 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
691 continue;
692 }
693
694 LASSERT(page->cp_obj != NULL);
695 if (clobj != page->cp_obj) {
696 struct cl_object *tmp = page->cp_obj;
697
698 cl_object_get(tmp);
699 client_obd_list_unlock(&cli->cl_lru_list_lock);
700
701 if (clobj != NULL) {
702 count -= discard_pagevec(env, io, pvec, index);
703 index = 0;
704
705 cl_io_fini(env, io);
706 cl_object_put(env, clobj);
707 clobj = NULL;
708 }
709
710 clobj = tmp;
711 io->ci_obj = clobj;
712 io->ci_ignore_layout = 1;
713 rc = cl_io_init(env, io, CIT_MISC, clobj);
714
715 client_obd_list_lock(&cli->cl_lru_list_lock);
716
717 if (rc != 0)
718 break;
719
720 ++maxscan;
721 continue;
722 }
723
724 /* move this page to the end of list as it will be discarded
725 * soon. The page will be finally removed from LRU list in
726 * osc_page_delete(). */
727 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
728
729 /* it's okay to grab a refcount here w/o holding lock because
730 * it has to grab cl_lru_list_lock to delete the page. */
731 cl_page_get(page);
732 pvec[index++] = page;
733 if (++count >= target)
734 break;
735
736 if (unlikely(index == OTI_PVEC_SIZE)) {
737 client_obd_list_unlock(&cli->cl_lru_list_lock);
738 count -= discard_pagevec(env, io, pvec, index);
739 index = 0;
740
741 client_obd_list_lock(&cli->cl_lru_list_lock);
742 }
743 }
744 client_obd_list_unlock(&cli->cl_lru_list_lock);
745
746 if (clobj != NULL) {
747 count -= discard_pagevec(env, io, pvec, index);
748
749 cl_io_fini(env, io);
750 cl_object_put(env, clobj);
751 }
752 cl_env_nested_put(&nest, env);
753
754 atomic_dec(&cli->cl_lru_shrinkers);
0a3bdb00 755 return count > 0 ? count : rc;
d7e09d03
PT
756}
757
758static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
759{
760 bool wakeup = false;
761
762 if (!opg->ops_in_lru)
763 return;
764
765 atomic_dec(&cli->cl_lru_busy);
766 client_obd_list_lock(&cli->cl_lru_list_lock);
767 if (list_empty(&opg->ops_lru)) {
768 list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
769 atomic_inc_return(&cli->cl_lru_in_list);
770 wakeup = atomic_read(&osc_lru_waiters) > 0;
771 }
772 client_obd_list_unlock(&cli->cl_lru_list_lock);
773
774 if (wakeup) {
775 osc_lru_shrink(cli, osc_cache_too_much(cli));
776 wake_up_all(&osc_lru_waitq);
777 }
778}
779
780/* delete page from LRUlist. The page can be deleted from LRUlist for two
781 * reasons: redirtied or deleted from page cache. */
782static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
783{
784 if (opg->ops_in_lru) {
785 client_obd_list_lock(&cli->cl_lru_list_lock);
786 if (!list_empty(&opg->ops_lru)) {
787 LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
788 list_del_init(&opg->ops_lru);
789 atomic_dec(&cli->cl_lru_in_list);
790 if (!del)
791 atomic_inc(&cli->cl_lru_busy);
792 } else if (del) {
793 LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
794 atomic_dec(&cli->cl_lru_busy);
795 }
796 client_obd_list_unlock(&cli->cl_lru_list_lock);
797 if (del) {
798 atomic_inc(cli->cl_lru_left);
799 /* this is a great place to release more LRU pages if
800 * this osc occupies too many LRU pages and kernel is
801 * stealing one of them.
802 * cl_lru_shrinkers is to avoid recursive call in case
803 * we're already in the context of osc_lru_shrink(). */
cad6fafa
BJ
804 if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
805 !memory_pressure_get())
d7e09d03
PT
806 osc_lru_shrink(cli, osc_cache_too_much(cli));
807 wake_up(&osc_lru_waitq);
808 }
809 } else {
810 LASSERT(list_empty(&opg->ops_lru));
811 }
812}
813
814static inline int max_to_shrink(struct client_obd *cli)
815{
816 return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
817}
818
819static int osc_lru_reclaim(struct client_obd *cli)
820{
821 struct cl_client_cache *cache = cli->cl_cache;
822 int max_scans;
823 int rc;
824
825 LASSERT(cache != NULL);
826 LASSERT(!list_empty(&cache->ccc_lru));
827
828 rc = osc_lru_shrink(cli, lru_shrink_min);
829 if (rc != 0) {
830 CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
831 cli->cl_import->imp_obd->obd_name, rc, cli);
832 return rc;
833 }
834
835 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
836 cli->cl_import->imp_obd->obd_name, cli,
837 atomic_read(&cli->cl_lru_in_list),
838 atomic_read(&cli->cl_lru_busy));
839
840 /* Reclaim LRU slots from other client_obd as it can't free enough
841 * from its own. This should rarely happen. */
842 spin_lock(&cache->ccc_lru_lock);
843 cache->ccc_lru_shrinkers++;
844 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
845
846 max_scans = atomic_read(&cache->ccc_users);
847 while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
848 cli = list_entry(cache->ccc_lru.next, struct client_obd,
849 cl_lru_osc);
850
851 CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
852 cli->cl_import->imp_obd->obd_name, cli,
853 atomic_read(&cli->cl_lru_in_list),
854 atomic_read(&cli->cl_lru_busy));
855
856 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
857 if (atomic_read(&cli->cl_lru_in_list) > 0) {
858 spin_unlock(&cache->ccc_lru_lock);
859
860 rc = osc_lru_shrink(cli, max_to_shrink(cli));
861 spin_lock(&cache->ccc_lru_lock);
862 if (rc != 0)
863 break;
864 }
865 }
866 spin_unlock(&cache->ccc_lru_lock);
867
868 CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
869 cli->cl_import->imp_obd->obd_name, cli, rc);
870 return rc;
871}
872
873static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
874 struct osc_page *opg)
875{
876 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
877 struct client_obd *cli = osc_cli(obj);
878 int rc = 0;
d7e09d03
PT
879
880 if (cli->cl_cache == NULL) /* shall not be in LRU */
0a3bdb00 881 return 0;
d7e09d03
PT
882
883 LASSERT(atomic_read(cli->cl_lru_left) >= 0);
305ec768 884 while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
d7e09d03
PT
885 int gen;
886
887 /* run out of LRU spaces, try to drop some by itself */
888 rc = osc_lru_reclaim(cli);
889 if (rc < 0)
890 break;
891 if (rc > 0)
892 continue;
893
894 cond_resched();
895
896 /* slowest case, all of caching pages are busy, notifying
897 * other OSCs that we're lack of LRU slots. */
898 atomic_inc(&osc_lru_waiters);
899
900 gen = atomic_read(&cli->cl_lru_in_list);
901 rc = l_wait_event(osc_lru_waitq,
902 atomic_read(cli->cl_lru_left) > 0 ||
903 (atomic_read(&cli->cl_lru_in_list) > 0 &&
904 gen != atomic_read(&cli->cl_lru_in_list)),
905 &lwi);
906
907 atomic_dec(&osc_lru_waiters);
908 if (rc < 0)
909 break;
910 }
911
912 if (rc >= 0) {
913 atomic_inc(&cli->cl_lru_busy);
914 opg->ops_in_lru = 1;
915 rc = 0;
916 }
917
0a3bdb00 918 return rc;
d7e09d03
PT
919}
920
921/** @} osc */