Commit | Line | Data |
---|---|---|
3bd94003 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
95d402f0 MP |
2 | /* |
3 | * Copyright (C) 2009-2011 Red Hat, Inc. | |
4 | * | |
5 | * Author: Mikulas Patocka <mpatocka@redhat.com> | |
6 | * | |
7 | * This file is released under the GPL. | |
8 | */ | |
9 | ||
afa53df8 | 10 | #include <linux/dm-bufio.h> |
95d402f0 MP |
11 | |
12 | #include <linux/device-mapper.h> | |
13 | #include <linux/dm-io.h> | |
14 | #include <linux/slab.h> | |
5b3cc15a | 15 | #include <linux/sched/mm.h> |
f495339c | 16 | #include <linux/jiffies.h> |
95d402f0 | 17 | #include <linux/vmalloc.h> |
95d402f0 | 18 | #include <linux/shrinker.h> |
6f66263f | 19 | #include <linux/module.h> |
4e420c45 | 20 | #include <linux/rbtree.h> |
86bad0c7 | 21 | #include <linux/stacktrace.h> |
3c1c875d | 22 | #include <linux/jump_label.h> |
95d402f0 | 23 | |
1e84c4b7 MS |
24 | #include "dm.h" |
25 | ||
95d402f0 MP |
26 | #define DM_MSG_PREFIX "bufio" |
27 | ||
28 | /* | |
29 | * Memory management policy: | |
30 | * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory | |
31 | * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). | |
32 | * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. | |
33 | * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT | |
34 | * dirty buffers. | |
35 | */ | |
36 | #define DM_BUFIO_MIN_BUFFERS 8 | |
37 | ||
38 | #define DM_BUFIO_MEMORY_PERCENT 2 | |
39 | #define DM_BUFIO_VMALLOC_PERCENT 25 | |
b132ff33 | 40 | #define DM_BUFIO_WRITEBACK_RATIO 3 |
6e913b28 | 41 | #define DM_BUFIO_LOW_WATERMARK_RATIO 16 |
95d402f0 MP |
42 | |
43 | /* | |
44 | * Check buffer ages in this interval (seconds) | |
45 | */ | |
33096a78 | 46 | #define DM_BUFIO_WORK_TIMER_SECS 30 |
95d402f0 MP |
47 | |
48 | /* | |
49 | * Free buffers when they are older than this (seconds) | |
50 | */ | |
33096a78 | 51 | #define DM_BUFIO_DEFAULT_AGE_SECS 300 |
95d402f0 MP |
52 | |
53 | /* | |
33096a78 | 54 | * The nr of bytes of cached data to keep around. |
95d402f0 | 55 | */ |
33096a78 | 56 | #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024) |
95d402f0 | 57 | |
1e3b21c6 MP |
58 | /* |
59 | * Align buffer writes to this boundary. | |
60 | * Tests show that SSDs have the highest IOPS when using 4k writes. | |
61 | */ | |
62 | #define DM_BUFIO_WRITE_ALIGN 4096 | |
63 | ||
95d402f0 MP |
64 | /* |
65 | * dm_buffer->list_mode | |
66 | */ | |
67 | #define LIST_CLEAN 0 | |
68 | #define LIST_DIRTY 1 | |
69 | #define LIST_SIZE 2 | |
70 | ||
be845bab JT |
71 | /*--------------------------------------------------------------*/ |
72 | ||
73 | /* | |
74 | * Rather than use an LRU list, we use a clock algorithm where entries | |
75 | * are held in a circular list. When an entry is 'hit' a reference bit | |
76 | * is set. The least recently used entry is approximated by running a | |
77 | * cursor around the list selecting unreferenced entries. Referenced | |
78 | * entries have their reference bit cleared as the cursor passes them. | |
79 | */ | |
80 | struct lru_entry { | |
81 | struct list_head list; | |
82 | atomic_t referenced; | |
83 | }; | |
84 | ||
85 | struct lru_iter { | |
86 | struct lru *lru; | |
87 | struct list_head list; | |
88 | struct lru_entry *stop; | |
89 | struct lru_entry *e; | |
90 | }; | |
91 | ||
92 | struct lru { | |
93 | struct list_head *cursor; | |
94 | unsigned long count; | |
95 | ||
96 | struct list_head iterators; | |
97 | }; | |
98 | ||
99 | /*--------------*/ | |
100 | ||
101 | static void lru_init(struct lru *lru) | |
102 | { | |
103 | lru->cursor = NULL; | |
104 | lru->count = 0; | |
105 | INIT_LIST_HEAD(&lru->iterators); | |
106 | } | |
107 | ||
108 | static void lru_destroy(struct lru *lru) | |
109 | { | |
110 | WARN_ON_ONCE(lru->cursor); | |
111 | WARN_ON_ONCE(!list_empty(&lru->iterators)); | |
112 | } | |
113 | ||
114 | /* | |
115 | * Insert a new entry into the lru. | |
116 | */ | |
117 | static void lru_insert(struct lru *lru, struct lru_entry *le) | |
118 | { | |
119 | /* | |
120 | * Don't be tempted to set to 1, makes the lru aspect | |
121 | * perform poorly. | |
122 | */ | |
123 | atomic_set(&le->referenced, 0); | |
124 | ||
125 | if (lru->cursor) { | |
126 | list_add_tail(&le->list, lru->cursor); | |
127 | } else { | |
128 | INIT_LIST_HEAD(&le->list); | |
129 | lru->cursor = &le->list; | |
130 | } | |
131 | lru->count++; | |
132 | } | |
133 | ||
134 | /*--------------*/ | |
135 | ||
136 | /* | |
137 | * Convert a list_head pointer to an lru_entry pointer. | |
138 | */ | |
139 | static inline struct lru_entry *to_le(struct list_head *l) | |
140 | { | |
141 | return container_of(l, struct lru_entry, list); | |
142 | } | |
143 | ||
144 | /* | |
145 | * Initialize an lru_iter and add it to the list of cursors in the lru. | |
146 | */ | |
147 | static void lru_iter_begin(struct lru *lru, struct lru_iter *it) | |
148 | { | |
149 | it->lru = lru; | |
150 | it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; | |
151 | it->e = lru->cursor ? to_le(lru->cursor) : NULL; | |
152 | list_add(&it->list, &lru->iterators); | |
153 | } | |
154 | ||
155 | /* | |
156 | * Remove an lru_iter from the list of cursors in the lru. | |
157 | */ | |
158 | static inline void lru_iter_end(struct lru_iter *it) | |
159 | { | |
160 | list_del(&it->list); | |
161 | } | |
162 | ||
163 | /* Predicate function type to be used with lru_iter_next */ | |
164 | typedef bool (*iter_predicate)(struct lru_entry *le, void *context); | |
165 | ||
166 | /* | |
167 | * Advance the cursor to the next entry that passes the | |
168 | * predicate, and return that entry. Returns NULL if the | |
169 | * iteration is complete. | |
170 | */ | |
171 | static struct lru_entry *lru_iter_next(struct lru_iter *it, | |
172 | iter_predicate pred, void *context) | |
173 | { | |
174 | struct lru_entry *e; | |
175 | ||
176 | while (it->e) { | |
177 | e = it->e; | |
178 | ||
179 | /* advance the cursor */ | |
180 | if (it->e == it->stop) | |
181 | it->e = NULL; | |
182 | else | |
183 | it->e = to_le(it->e->list.next); | |
184 | ||
185 | if (pred(e, context)) | |
186 | return e; | |
187 | } | |
188 | ||
189 | return NULL; | |
190 | } | |
191 | ||
192 | /* | |
193 | * Invalidate a specific lru_entry and update all cursors in | |
194 | * the lru accordingly. | |
195 | */ | |
196 | static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e) | |
197 | { | |
198 | struct lru_iter *it; | |
199 | ||
200 | list_for_each_entry(it, &lru->iterators, list) { | |
201 | /* Move c->e forwards if necc. */ | |
202 | if (it->e == e) { | |
203 | it->e = to_le(it->e->list.next); | |
204 | if (it->e == e) | |
205 | it->e = NULL; | |
206 | } | |
207 | ||
208 | /* Move it->stop backwards if necc. */ | |
209 | if (it->stop == e) { | |
210 | it->stop = to_le(it->stop->list.prev); | |
211 | if (it->stop == e) | |
212 | it->stop = NULL; | |
213 | } | |
214 | } | |
215 | } | |
216 | ||
217 | /*--------------*/ | |
218 | ||
219 | /* | |
220 | * Remove a specific entry from the lru. | |
221 | */ | |
222 | static void lru_remove(struct lru *lru, struct lru_entry *le) | |
223 | { | |
224 | lru_iter_invalidate(lru, le); | |
225 | if (lru->count == 1) { | |
226 | lru->cursor = NULL; | |
227 | } else { | |
228 | if (lru->cursor == &le->list) | |
229 | lru->cursor = lru->cursor->next; | |
230 | list_del(&le->list); | |
231 | } | |
232 | lru->count--; | |
233 | } | |
234 | ||
235 | /* | |
236 | * Mark as referenced. | |
237 | */ | |
238 | static inline void lru_reference(struct lru_entry *le) | |
239 | { | |
240 | atomic_set(&le->referenced, 1); | |
241 | } | |
242 | ||
243 | /*--------------*/ | |
244 | ||
245 | /* | |
246 | * Remove the least recently used entry (approx), that passes the predicate. | |
247 | * Returns NULL on failure. | |
248 | */ | |
249 | enum evict_result { | |
250 | ER_EVICT, | |
251 | ER_DONT_EVICT, | |
252 | ER_STOP, /* stop looking for something to evict */ | |
253 | }; | |
254 | ||
255 | typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); | |
256 | ||
2a695062 | 257 | static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep) |
be845bab JT |
258 | { |
259 | unsigned long tested = 0; | |
260 | struct list_head *h = lru->cursor; | |
261 | struct lru_entry *le; | |
262 | ||
263 | if (!h) | |
264 | return NULL; | |
265 | /* | |
266 | * In the worst case we have to loop around twice. Once to clear | |
267 | * the reference flags, and then again to discover the predicate | |
268 | * fails for all entries. | |
269 | */ | |
270 | while (tested < lru->count) { | |
271 | le = container_of(h, struct lru_entry, list); | |
272 | ||
273 | if (atomic_read(&le->referenced)) { | |
274 | atomic_set(&le->referenced, 0); | |
275 | } else { | |
276 | tested++; | |
277 | switch (pred(le, context)) { | |
278 | case ER_EVICT: | |
279 | /* | |
280 | * Adjust the cursor, so we start the next | |
281 | * search from here. | |
282 | */ | |
283 | lru->cursor = le->list.next; | |
284 | lru_remove(lru, le); | |
285 | return le; | |
286 | ||
287 | case ER_DONT_EVICT: | |
288 | break; | |
289 | ||
290 | case ER_STOP: | |
291 | lru->cursor = le->list.next; | |
292 | return NULL; | |
293 | } | |
294 | } | |
295 | ||
296 | h = h->next; | |
297 | ||
2a695062 MP |
298 | if (!no_sleep) |
299 | cond_resched(); | |
be845bab JT |
300 | } |
301 | ||
302 | return NULL; | |
303 | } | |
304 | ||
305 | /*--------------------------------------------------------------*/ | |
306 | ||
2cd7a6d4 JT |
307 | /* |
308 | * Buffer state bits. | |
309 | */ | |
310 | #define B_READING 0 | |
311 | #define B_WRITING 1 | |
312 | #define B_DIRTY 2 | |
313 | ||
314 | /* | |
315 | * Describes how the block was allocated: | |
316 | * kmem_cache_alloc(), __get_free_pages() or vmalloc(). | |
317 | * See the comment at alloc_buffer_data. | |
318 | */ | |
319 | enum data_mode { | |
320 | DATA_MODE_SLAB = 0, | |
321 | DATA_MODE_GET_FREE_PAGES = 1, | |
322 | DATA_MODE_VMALLOC = 2, | |
323 | DATA_MODE_LIMIT = 3 | |
324 | }; | |
325 | ||
326 | struct dm_buffer { | |
450e8dee | 327 | /* protected by the locks in dm_buffer_cache */ |
2cd7a6d4 | 328 | struct rb_node node; |
2cd7a6d4 | 329 | |
450e8dee | 330 | /* immutable, so don't need protecting */ |
2cd7a6d4 JT |
331 | sector_t block; |
332 | void *data; | |
333 | unsigned char data_mode; /* DATA_MODE_* */ | |
334 | ||
450e8dee JT |
335 | /* |
336 | * These two fields are used in isolation, so do not need | |
337 | * a surrounding lock. | |
338 | */ | |
339 | atomic_t hold_count; | |
2cd7a6d4 JT |
340 | unsigned long last_accessed; |
341 | ||
450e8dee JT |
342 | /* |
343 | * Everything else is protected by the mutex in | |
344 | * dm_bufio_client | |
345 | */ | |
346 | unsigned long state; | |
347 | struct lru_entry lru; | |
2cd7a6d4 JT |
348 | unsigned char list_mode; /* LIST_* */ |
349 | blk_status_t read_error; | |
350 | blk_status_t write_error; | |
2cd7a6d4 JT |
351 | unsigned int dirty_start; |
352 | unsigned int dirty_end; | |
353 | unsigned int write_start; | |
354 | unsigned int write_end; | |
2cd7a6d4 | 355 | struct list_head write_list; |
450e8dee | 356 | struct dm_bufio_client *c; |
2cd7a6d4 JT |
357 | void (*end_io)(struct dm_buffer *b, blk_status_t bs); |
358 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING | |
359 | #define MAX_STACK 10 | |
360 | unsigned int stack_len; | |
361 | unsigned long stack_entries[MAX_STACK]; | |
362 | #endif | |
2cd7a6d4 JT |
363 | }; |
364 | ||
365 | /*--------------------------------------------------------------*/ | |
366 | ||
367 | /* | |
368 | * The buffer cache manages buffers, particularly: | |
369 | * - inc/dec of holder count | |
370 | * - setting the last_accessed field | |
371 | * - maintains clean/dirty state along with lru | |
372 | * - selecting buffers that match predicates | |
373 | * | |
374 | * It does *not* handle: | |
375 | * - allocation/freeing of buffers. | |
376 | * - IO | |
377 | * - Eviction or cache sizing. | |
378 | * | |
379 | * cache_get() and cache_put() are threadsafe, you do not need to | |
380 | * protect these calls with a surrounding mutex. All the other | |
381 | * methods are not threadsafe; they do use locking primitives, but | |
382 | * only enough to ensure get/put are threadsafe. | |
383 | */ | |
384 | ||
2cd7a6d4 | 385 | struct buffer_tree { |
2a695062 MP |
386 | union { |
387 | struct rw_semaphore lock; | |
388 | rwlock_t spinlock; | |
389 | } u; | |
2cd7a6d4 JT |
390 | struct rb_root root; |
391 | } ____cacheline_aligned_in_smp; | |
392 | ||
393 | struct dm_buffer_cache { | |
36c18b86 | 394 | struct lru lru[LIST_SIZE]; |
2cd7a6d4 JT |
395 | /* |
396 | * We spread entries across multiple trees to reduce contention | |
397 | * on the locks. | |
398 | */ | |
36c18b86 | 399 | unsigned int num_locks; |
2a695062 | 400 | bool no_sleep; |
1e84c4b7 | 401 | struct buffer_tree trees[]; |
2cd7a6d4 JT |
402 | }; |
403 | ||
2a695062 MP |
404 | static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); |
405 | ||
36c18b86 | 406 | static inline unsigned int cache_index(sector_t block, unsigned int num_locks) |
2cd7a6d4 | 407 | { |
363b7fd7 | 408 | return dm_hash_locks_index(block, num_locks); |
2cd7a6d4 JT |
409 | } |
410 | ||
411 | static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) | |
412 | { | |
2a695062 MP |
413 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
414 | read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
415 | else | |
416 | down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
417 | } |
418 | ||
419 | static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) | |
420 | { | |
2a695062 MP |
421 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
422 | read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
423 | else | |
424 | up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
425 | } |
426 | ||
427 | static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) | |
428 | { | |
2a695062 MP |
429 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
430 | write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
431 | else | |
432 | down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
433 | } |
434 | ||
435 | static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) | |
436 | { | |
2a695062 MP |
437 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
438 | write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
439 | else | |
440 | up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
441 | } |
442 | ||
79118806 JT |
443 | /* |
444 | * Sometimes we want to repeatedly get and drop locks as part of an iteration. | |
445 | * This struct helps avoid redundant drop and gets of the same lock. | |
446 | */ | |
447 | struct lock_history { | |
448 | struct dm_buffer_cache *cache; | |
449 | bool write; | |
450 | unsigned int previous; | |
36c18b86 | 451 | unsigned int no_previous; |
79118806 JT |
452 | }; |
453 | ||
454 | static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) | |
455 | { | |
456 | lh->cache = cache; | |
457 | lh->write = write; | |
36c18b86 MS |
458 | lh->no_previous = cache->num_locks; |
459 | lh->previous = lh->no_previous; | |
79118806 JT |
460 | } |
461 | ||
462 | static void __lh_lock(struct lock_history *lh, unsigned int index) | |
463 | { | |
2a695062 MP |
464 | if (lh->write) { |
465 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
466 | write_lock_bh(&lh->cache->trees[index].u.spinlock); | |
467 | else | |
468 | down_write(&lh->cache->trees[index].u.lock); | |
469 | } else { | |
470 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
471 | read_lock_bh(&lh->cache->trees[index].u.spinlock); | |
472 | else | |
473 | down_read(&lh->cache->trees[index].u.lock); | |
474 | } | |
79118806 JT |
475 | } |
476 | ||
477 | static void __lh_unlock(struct lock_history *lh, unsigned int index) | |
478 | { | |
2a695062 MP |
479 | if (lh->write) { |
480 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
481 | write_unlock_bh(&lh->cache->trees[index].u.spinlock); | |
482 | else | |
483 | up_write(&lh->cache->trees[index].u.lock); | |
484 | } else { | |
485 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
486 | read_unlock_bh(&lh->cache->trees[index].u.spinlock); | |
487 | else | |
488 | up_read(&lh->cache->trees[index].u.lock); | |
489 | } | |
79118806 JT |
490 | } |
491 | ||
492 | /* | |
493 | * Make sure you call this since it will unlock the final lock. | |
494 | */ | |
495 | static void lh_exit(struct lock_history *lh) | |
496 | { | |
36c18b86 | 497 | if (lh->previous != lh->no_previous) { |
79118806 | 498 | __lh_unlock(lh, lh->previous); |
36c18b86 | 499 | lh->previous = lh->no_previous; |
79118806 JT |
500 | } |
501 | } | |
502 | ||
503 | /* | |
504 | * Named 'next' because there is no corresponding | |
505 | * 'up/unlock' call since it's done automatically. | |
506 | */ | |
507 | static void lh_next(struct lock_history *lh, sector_t b) | |
508 | { | |
36c18b86 | 509 | unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ |
79118806 | 510 | |
36c18b86 | 511 | if (lh->previous != lh->no_previous) { |
79118806 JT |
512 | if (lh->previous != index) { |
513 | __lh_unlock(lh, lh->previous); | |
514 | __lh_lock(lh, index); | |
515 | lh->previous = index; | |
516 | } | |
517 | } else { | |
518 | __lh_lock(lh, index); | |
519 | lh->previous = index; | |
520 | } | |
521 | } | |
522 | ||
2cd7a6d4 JT |
523 | static inline struct dm_buffer *le_to_buffer(struct lru_entry *le) |
524 | { | |
525 | return container_of(le, struct dm_buffer, lru); | |
526 | } | |
527 | ||
528 | static struct dm_buffer *list_to_buffer(struct list_head *l) | |
529 | { | |
530 | struct lru_entry *le = list_entry(l, struct lru_entry, list); | |
531 | ||
532 | if (!le) | |
533 | return NULL; | |
534 | ||
535 | return le_to_buffer(le); | |
536 | } | |
537 | ||
2a695062 | 538 | static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) |
2cd7a6d4 JT |
539 | { |
540 | unsigned int i; | |
541 | ||
36c18b86 | 542 | bc->num_locks = num_locks; |
2a695062 | 543 | bc->no_sleep = no_sleep; |
36c18b86 MS |
544 | |
545 | for (i = 0; i < bc->num_locks; i++) { | |
2a695062 MP |
546 | if (no_sleep) |
547 | rwlock_init(&bc->trees[i].u.spinlock); | |
548 | else | |
549 | init_rwsem(&bc->trees[i].u.lock); | |
2cd7a6d4 JT |
550 | bc->trees[i].root = RB_ROOT; |
551 | } | |
552 | ||
553 | lru_init(&bc->lru[LIST_CLEAN]); | |
554 | lru_init(&bc->lru[LIST_DIRTY]); | |
555 | } | |
556 | ||
557 | static void cache_destroy(struct dm_buffer_cache *bc) | |
558 | { | |
559 | unsigned int i; | |
560 | ||
36c18b86 | 561 | for (i = 0; i < bc->num_locks; i++) |
2cd7a6d4 JT |
562 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); |
563 | ||
564 | lru_destroy(&bc->lru[LIST_CLEAN]); | |
565 | lru_destroy(&bc->lru[LIST_DIRTY]); | |
566 | } | |
567 | ||
568 | /*--------------*/ | |
569 | ||
570 | /* | |
571 | * not threadsafe, or racey depending how you look at it | |
572 | */ | |
573 | static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) | |
574 | { | |
575 | return bc->lru[list_mode].count; | |
576 | } | |
577 | ||
578 | static inline unsigned long cache_total(struct dm_buffer_cache *bc) | |
579 | { | |
580 | return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); | |
581 | } | |
582 | ||
583 | /*--------------*/ | |
584 | ||
585 | /* | |
586 | * Gets a specific buffer, indexed by block. | |
587 | * If the buffer is found then its holder count will be incremented and | |
588 | * lru_reference will be called. | |
589 | * | |
590 | * threadsafe | |
591 | */ | |
592 | static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) | |
593 | { | |
594 | struct rb_node *n = root->rb_node; | |
595 | struct dm_buffer *b; | |
596 | ||
597 | while (n) { | |
598 | b = container_of(n, struct dm_buffer, node); | |
599 | ||
600 | if (b->block == block) | |
601 | return b; | |
602 | ||
603 | n = block < b->block ? n->rb_left : n->rb_right; | |
604 | } | |
605 | ||
606 | return NULL; | |
607 | } | |
608 | ||
609 | static void __cache_inc_buffer(struct dm_buffer *b) | |
610 | { | |
611 | atomic_inc(&b->hold_count); | |
612 | WRITE_ONCE(b->last_accessed, jiffies); | |
613 | } | |
614 | ||
615 | static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) | |
616 | { | |
617 | struct dm_buffer *b; | |
618 | ||
619 | cache_read_lock(bc, block); | |
36c18b86 | 620 | b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); |
2cd7a6d4 JT |
621 | if (b) { |
622 | lru_reference(&b->lru); | |
623 | __cache_inc_buffer(b); | |
624 | } | |
625 | cache_read_unlock(bc, block); | |
626 | ||
627 | return b; | |
628 | } | |
629 | ||
630 | /*--------------*/ | |
631 | ||
632 | /* | |
633 | * Returns true if the hold count hits zero. | |
634 | * threadsafe | |
635 | */ | |
636 | static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) | |
637 | { | |
638 | bool r; | |
639 | ||
640 | cache_read_lock(bc, b->block); | |
641 | BUG_ON(!atomic_read(&b->hold_count)); | |
642 | r = atomic_dec_and_test(&b->hold_count); | |
643 | cache_read_unlock(bc, b->block); | |
644 | ||
645 | return r; | |
646 | } | |
647 | ||
648 | /*--------------*/ | |
649 | ||
650 | typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); | |
651 | ||
652 | /* | |
653 | * Evicts a buffer based on a predicate. The oldest buffer that | |
654 | * matches the predicate will be selected. In addition to the | |
655 | * predicate the hold_count of the selected buffer will be zero. | |
656 | */ | |
657 | struct evict_wrapper { | |
79118806 | 658 | struct lock_history *lh; |
2cd7a6d4 JT |
659 | b_predicate pred; |
660 | void *context; | |
661 | }; | |
662 | ||
663 | /* | |
664 | * Wraps the buffer predicate turning it into an lru predicate. Adds | |
665 | * extra test for hold_count. | |
666 | */ | |
667 | static enum evict_result __evict_pred(struct lru_entry *le, void *context) | |
668 | { | |
669 | struct evict_wrapper *w = context; | |
670 | struct dm_buffer *b = le_to_buffer(le); | |
671 | ||
79118806 JT |
672 | lh_next(w->lh, b->block); |
673 | ||
2cd7a6d4 JT |
674 | if (atomic_read(&b->hold_count)) |
675 | return ER_DONT_EVICT; | |
676 | ||
677 | return w->pred(b, w->context); | |
678 | } | |
679 | ||
79118806 JT |
680 | static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, |
681 | b_predicate pred, void *context, | |
682 | struct lock_history *lh) | |
2cd7a6d4 | 683 | { |
79118806 | 684 | struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; |
2cd7a6d4 JT |
685 | struct lru_entry *le; |
686 | struct dm_buffer *b; | |
687 | ||
2a695062 | 688 | le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); |
2cd7a6d4 JT |
689 | if (!le) |
690 | return NULL; | |
691 | ||
692 | b = le_to_buffer(le); | |
693 | /* __evict_pred will have locked the appropriate tree. */ | |
36c18b86 | 694 | rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); |
2cd7a6d4 JT |
695 | |
696 | return b; | |
697 | } | |
698 | ||
79118806 JT |
699 | static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, |
700 | b_predicate pred, void *context) | |
701 | { | |
702 | struct dm_buffer *b; | |
703 | struct lock_history lh; | |
704 | ||
705 | lh_init(&lh, bc, true); | |
706 | b = __cache_evict(bc, list_mode, pred, context, &lh); | |
707 | lh_exit(&lh); | |
708 | ||
709 | return b; | |
710 | } | |
711 | ||
2cd7a6d4 JT |
712 | /*--------------*/ |
713 | ||
714 | /* | |
715 | * Mark a buffer as clean or dirty. Not threadsafe. | |
716 | */ | |
717 | static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) | |
718 | { | |
719 | cache_write_lock(bc, b->block); | |
720 | if (list_mode != b->list_mode) { | |
721 | lru_remove(&bc->lru[b->list_mode], &b->lru); | |
722 | b->list_mode = list_mode; | |
723 | lru_insert(&bc->lru[b->list_mode], &b->lru); | |
724 | } | |
725 | cache_write_unlock(bc, b->block); | |
726 | } | |
727 | ||
728 | /*--------------*/ | |
729 | ||
730 | /* | |
731 | * Runs through the lru associated with 'old_mode', if the predicate matches then | |
732 | * it moves them to 'new_mode'. Not threadsafe. | |
733 | */ | |
79118806 JT |
734 | static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, |
735 | b_predicate pred, void *context, struct lock_history *lh) | |
2cd7a6d4 JT |
736 | { |
737 | struct lru_entry *le; | |
738 | struct dm_buffer *b; | |
79118806 | 739 | struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; |
2cd7a6d4 JT |
740 | |
741 | while (true) { | |
2a695062 | 742 | le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); |
2cd7a6d4 JT |
743 | if (!le) |
744 | break; | |
745 | ||
746 | b = le_to_buffer(le); | |
747 | b->list_mode = new_mode; | |
748 | lru_insert(&bc->lru[b->list_mode], &b->lru); | |
749 | } | |
750 | } | |
751 | ||
79118806 JT |
752 | static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, |
753 | b_predicate pred, void *context) | |
754 | { | |
755 | struct lock_history lh; | |
756 | ||
757 | lh_init(&lh, bc, true); | |
758 | __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); | |
759 | lh_exit(&lh); | |
760 | } | |
761 | ||
2cd7a6d4 JT |
762 | /*--------------*/ |
763 | ||
764 | /* | |
765 | * Iterates through all clean or dirty entries calling a function for each | |
766 | * entry. The callback may terminate the iteration early. Not threadsafe. | |
767 | */ | |
768 | ||
769 | /* | |
770 | * Iterator functions should return one of these actions to indicate | |
771 | * how the iteration should proceed. | |
772 | */ | |
773 | enum it_action { | |
774 | IT_NEXT, | |
775 | IT_COMPLETE, | |
776 | }; | |
777 | ||
778 | typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context); | |
779 | ||
79118806 JT |
780 | static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, |
781 | iter_fn fn, void *context, struct lock_history *lh) | |
2cd7a6d4 JT |
782 | { |
783 | struct lru *lru = &bc->lru[list_mode]; | |
784 | struct lru_entry *le, *first; | |
785 | ||
786 | if (!lru->cursor) | |
787 | return; | |
788 | ||
789 | first = le = to_le(lru->cursor); | |
790 | do { | |
791 | struct dm_buffer *b = le_to_buffer(le); | |
792 | ||
79118806 JT |
793 | lh_next(lh, b->block); |
794 | ||
2cd7a6d4 JT |
795 | switch (fn(b, context)) { |
796 | case IT_NEXT: | |
797 | break; | |
798 | ||
799 | case IT_COMPLETE: | |
800 | return; | |
801 | } | |
802 | cond_resched(); | |
803 | ||
804 | le = to_le(le->list.next); | |
805 | } while (le != first); | |
806 | } | |
807 | ||
79118806 JT |
808 | static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, |
809 | iter_fn fn, void *context) | |
810 | { | |
811 | struct lock_history lh; | |
812 | ||
813 | lh_init(&lh, bc, false); | |
814 | __cache_iterate(bc, list_mode, fn, context, &lh); | |
815 | lh_exit(&lh); | |
816 | } | |
817 | ||
2cd7a6d4 JT |
818 | /*--------------*/ |
819 | ||
820 | /* | |
821 | * Passes ownership of the buffer to the cache. Returns false if the | |
822 | * buffer was already present (in which case ownership does not pass). | |
823 | * eg, a race with another thread. | |
824 | * | |
825 | * Holder count should be 1 on insertion. | |
826 | * | |
827 | * Not threadsafe. | |
828 | */ | |
829 | static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) | |
830 | { | |
831 | struct rb_node **new = &root->rb_node, *parent = NULL; | |
832 | struct dm_buffer *found; | |
833 | ||
834 | while (*new) { | |
835 | found = container_of(*new, struct dm_buffer, node); | |
836 | ||
837 | if (found->block == b->block) | |
838 | return false; | |
839 | ||
840 | parent = *new; | |
841 | new = b->block < found->block ? | |
842 | &found->node.rb_left : &found->node.rb_right; | |
843 | } | |
844 | ||
845 | rb_link_node(&b->node, parent, new); | |
846 | rb_insert_color(&b->node, root); | |
847 | ||
848 | return true; | |
849 | } | |
850 | ||
851 | static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) | |
852 | { | |
853 | bool r; | |
854 | ||
855 | if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) | |
856 | return false; | |
857 | ||
858 | cache_write_lock(bc, b->block); | |
859 | BUG_ON(atomic_read(&b->hold_count) != 1); | |
36c18b86 | 860 | r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); |
2cd7a6d4 JT |
861 | if (r) |
862 | lru_insert(&bc->lru[b->list_mode], &b->lru); | |
863 | cache_write_unlock(bc, b->block); | |
864 | ||
865 | return r; | |
866 | } | |
867 | ||
868 | /*--------------*/ | |
869 | ||
870 | /* | |
871 | * Removes buffer from cache, ownership of the buffer passes back to the caller. | |
872 | * Fails if the hold_count is not one (ie. the caller holds the only reference). | |
873 | * | |
874 | * Not threadsafe. | |
875 | */ | |
876 | static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) | |
877 | { | |
878 | bool r; | |
879 | ||
880 | cache_write_lock(bc, b->block); | |
881 | ||
882 | if (atomic_read(&b->hold_count) != 1) { | |
883 | r = false; | |
884 | } else { | |
885 | r = true; | |
36c18b86 | 886 | rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); |
2cd7a6d4 JT |
887 | lru_remove(&bc->lru[b->list_mode], &b->lru); |
888 | } | |
889 | ||
890 | cache_write_unlock(bc, b->block); | |
891 | ||
892 | return r; | |
893 | } | |
894 | ||
895 | /*--------------*/ | |
896 | ||
897 | typedef void (*b_release)(struct dm_buffer *); | |
898 | ||
899 | static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) | |
900 | { | |
901 | struct rb_node *n = root->rb_node; | |
902 | struct dm_buffer *b; | |
903 | struct dm_buffer *best = NULL; | |
904 | ||
905 | while (n) { | |
906 | b = container_of(n, struct dm_buffer, node); | |
907 | ||
908 | if (b->block == block) | |
909 | return b; | |
910 | ||
911 | if (block <= b->block) { | |
912 | n = n->rb_left; | |
913 | best = b; | |
914 | } else { | |
915 | n = n->rb_right; | |
916 | } | |
917 | } | |
918 | ||
919 | return best; | |
920 | } | |
921 | ||
922 | static void __remove_range(struct dm_buffer_cache *bc, | |
923 | struct rb_root *root, | |
924 | sector_t begin, sector_t end, | |
925 | b_predicate pred, b_release release) | |
926 | { | |
927 | struct dm_buffer *b; | |
928 | ||
929 | while (true) { | |
930 | cond_resched(); | |
931 | ||
932 | b = __find_next(root, begin); | |
933 | if (!b || (b->block >= end)) | |
934 | break; | |
935 | ||
936 | begin = b->block + 1; | |
937 | ||
938 | if (atomic_read(&b->hold_count)) | |
939 | continue; | |
940 | ||
941 | if (pred(b, NULL) == ER_EVICT) { | |
942 | rb_erase(&b->node, root); | |
943 | lru_remove(&bc->lru[b->list_mode], &b->lru); | |
944 | release(b); | |
945 | } | |
946 | } | |
947 | } | |
948 | ||
949 | static void cache_remove_range(struct dm_buffer_cache *bc, | |
950 | sector_t begin, sector_t end, | |
951 | b_predicate pred, b_release release) | |
952 | { | |
953 | unsigned int i; | |
954 | ||
2a695062 | 955 | BUG_ON(bc->no_sleep); |
36c18b86 | 956 | for (i = 0; i < bc->num_locks; i++) { |
2a695062 | 957 | down_write(&bc->trees[i].u.lock); |
2cd7a6d4 | 958 | __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); |
2a695062 | 959 | up_write(&bc->trees[i].u.lock); |
2cd7a6d4 JT |
960 | } |
961 | } | |
962 | ||
963 | /*----------------------------------------------------------------*/ | |
964 | ||
95d402f0 MP |
965 | /* |
966 | * Linking of buffers: | |
450e8dee | 967 | * All buffers are linked to buffer_cache with their node field. |
95d402f0 MP |
968 | * |
969 | * Clean buffers that are not being written (B_WRITING not set) | |
970 | * are linked to lru[LIST_CLEAN] with their lru_list field. | |
971 | * | |
972 | * Dirty and clean buffers that are being written are linked to | |
973 | * lru[LIST_DIRTY] with their lru_list field. When the write | |
974 | * finishes, the buffer cannot be relinked immediately (because we | |
975 | * are in an interrupt context and relinking requires process | |
976 | * context), so some clean-not-writing buffers can be held on | |
977 | * dirty_lru too. They are later added to lru in the process | |
978 | * context. | |
979 | */ | |
980 | struct dm_bufio_client { | |
95d402f0 | 981 | struct block_device *bdev; |
86a3238c | 982 | unsigned int block_size; |
f51f2e0a | 983 | s8 sectors_per_block_bits; |
530f683d MS |
984 | |
985 | bool no_sleep; | |
986 | struct mutex lock; | |
987 | spinlock_t spinlock; | |
988 | ||
989 | int async_write_error; | |
990 | ||
02f10ba1 HM |
991 | void (*alloc_callback)(struct dm_buffer *buf); |
992 | void (*write_callback)(struct dm_buffer *buf); | |
359dbf19 | 993 | struct kmem_cache *slab_buffer; |
21bb1327 | 994 | struct kmem_cache *slab_cache; |
95d402f0 MP |
995 | struct dm_io_client *dm_io; |
996 | ||
997 | struct list_head reserved_buffers; | |
86a3238c | 998 | unsigned int need_reserved_buffers; |
95d402f0 | 999 | |
86a3238c | 1000 | unsigned int minimum_buffers; |
55b082e6 | 1001 | |
400a0bef MP |
1002 | sector_t start; |
1003 | ||
1f1d459c | 1004 | struct shrinker *shrinker; |
70704c33 MP |
1005 | struct work_struct shrink_work; |
1006 | atomic_long_t need_shrink; | |
450e8dee | 1007 | |
530f683d MS |
1008 | wait_queue_head_t free_buffer_wait; |
1009 | ||
1010 | struct list_head client_list; | |
1011 | ||
450e8dee JT |
1012 | /* |
1013 | * Used by global_cleanup to sort the clients list. | |
1014 | */ | |
1015 | unsigned long oldest_buffer; | |
530f683d | 1016 | |
1e84c4b7 | 1017 | struct dm_buffer_cache cache; /* must be last member */ |
95d402f0 MP |
1018 | }; |
1019 | ||
95d402f0 MP |
1020 | /*----------------------------------------------------------------*/ |
1021 | ||
95d402f0 MP |
1022 | #define dm_bufio_in_request() (!!current->bio_list) |
1023 | ||
1024 | static void dm_bufio_lock(struct dm_bufio_client *c) | |
1025 | { | |
3c1c875d | 1026 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
b33b6fdc | 1027 | spin_lock_bh(&c->spinlock); |
b32d4582 NH |
1028 | else |
1029 | mutex_lock_nested(&c->lock, dm_bufio_in_request()); | |
95d402f0 MP |
1030 | } |
1031 | ||
95d402f0 MP |
1032 | static void dm_bufio_unlock(struct dm_bufio_client *c) |
1033 | { | |
3c1c875d | 1034 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
b33b6fdc | 1035 | spin_unlock_bh(&c->spinlock); |
b32d4582 NH |
1036 | else |
1037 | mutex_unlock(&c->lock); | |
95d402f0 MP |
1038 | } |
1039 | ||
95d402f0 MP |
1040 | /*----------------------------------------------------------------*/ |
1041 | ||
1042 | /* | |
1043 | * Default cache size: available memory divided by the ratio. | |
1044 | */ | |
1045 | static unsigned long dm_bufio_default_cache_size; | |
1046 | ||
1047 | /* | |
1048 | * Total cache size set by the user. | |
1049 | */ | |
1050 | static unsigned long dm_bufio_cache_size; | |
1051 | ||
1052 | /* | |
1053 | * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change | |
1054 | * at any time. If it disagrees, the user has changed cache size. | |
1055 | */ | |
1056 | static unsigned long dm_bufio_cache_size_latch; | |
1057 | ||
af53badc MP |
1058 | static DEFINE_SPINLOCK(global_spinlock); |
1059 | ||
95d402f0 MP |
1060 | /* |
1061 | * Buffers are freed after this timeout | |
1062 | */ | |
86a3238c | 1063 | static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; |
13840d38 | 1064 | static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
95d402f0 MP |
1065 | |
1066 | static unsigned long dm_bufio_peak_allocated; | |
1067 | static unsigned long dm_bufio_allocated_kmem_cache; | |
1068 | static unsigned long dm_bufio_allocated_get_free_pages; | |
1069 | static unsigned long dm_bufio_allocated_vmalloc; | |
1070 | static unsigned long dm_bufio_current_allocated; | |
1071 | ||
1072 | /*----------------------------------------------------------------*/ | |
1073 | ||
95d402f0 MP |
1074 | /* |
1075 | * The current number of clients. | |
1076 | */ | |
1077 | static int dm_bufio_client_count; | |
1078 | ||
1079 | /* | |
1080 | * The list of all clients. | |
1081 | */ | |
1082 | static LIST_HEAD(dm_bufio_all_clients); | |
1083 | ||
1084 | /* | |
b132ff33 | 1085 | * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count |
95d402f0 MP |
1086 | */ |
1087 | static DEFINE_MUTEX(dm_bufio_clients_lock); | |
1088 | ||
6e913b28 MP |
1089 | static struct workqueue_struct *dm_bufio_wq; |
1090 | static struct delayed_work dm_bufio_cleanup_old_work; | |
1091 | static struct work_struct dm_bufio_replacement_work; | |
1092 | ||
1093 | ||
86bad0c7 MP |
1094 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
1095 | static void buffer_record_stack(struct dm_buffer *b) | |
1096 | { | |
741b58f3 | 1097 | b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); |
86bad0c7 MP |
1098 | } |
1099 | #endif | |
1100 | ||
95d402f0 MP |
1101 | /*----------------------------------------------------------------*/ |
1102 | ||
d0a328a3 | 1103 | static void adjust_total_allocated(struct dm_buffer *b, bool unlink) |
95d402f0 | 1104 | { |
d0a328a3 MP |
1105 | unsigned char data_mode; |
1106 | long diff; | |
1107 | ||
95d402f0 MP |
1108 | static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { |
1109 | &dm_bufio_allocated_kmem_cache, | |
1110 | &dm_bufio_allocated_get_free_pages, | |
1111 | &dm_bufio_allocated_vmalloc, | |
1112 | }; | |
1113 | ||
d0a328a3 MP |
1114 | data_mode = b->data_mode; |
1115 | diff = (long)b->c->block_size; | |
1116 | if (unlink) | |
1117 | diff = -diff; | |
1118 | ||
af53badc | 1119 | spin_lock(&global_spinlock); |
95d402f0 MP |
1120 | |
1121 | *class_ptr[data_mode] += diff; | |
1122 | ||
1123 | dm_bufio_current_allocated += diff; | |
1124 | ||
1125 | if (dm_bufio_current_allocated > dm_bufio_peak_allocated) | |
1126 | dm_bufio_peak_allocated = dm_bufio_current_allocated; | |
1127 | ||
af53badc | 1128 | if (!unlink) { |
6e913b28 MP |
1129 | if (dm_bufio_current_allocated > dm_bufio_cache_size) |
1130 | queue_work(dm_bufio_wq, &dm_bufio_replacement_work); | |
af53badc MP |
1131 | } |
1132 | ||
1133 | spin_unlock(&global_spinlock); | |
95d402f0 MP |
1134 | } |
1135 | ||
1136 | /* | |
1137 | * Change the number of clients and recalculate per-client limit. | |
1138 | */ | |
1139 | static void __cache_size_refresh(void) | |
1140 | { | |
b75a80f4 MS |
1141 | if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock))) |
1142 | return; | |
1143 | if (WARN_ON(dm_bufio_client_count < 0)) | |
1144 | return; | |
95d402f0 | 1145 | |
6aa7de05 | 1146 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
95d402f0 MP |
1147 | |
1148 | /* | |
1149 | * Use default if set to 0 and report the actual cache size used. | |
1150 | */ | |
1151 | if (!dm_bufio_cache_size_latch) { | |
1152 | (void)cmpxchg(&dm_bufio_cache_size, 0, | |
1153 | dm_bufio_default_cache_size); | |
1154 | dm_bufio_cache_size_latch = dm_bufio_default_cache_size; | |
1155 | } | |
95d402f0 MP |
1156 | } |
1157 | ||
1158 | /* | |
1159 | * Allocating buffer data. | |
1160 | * | |
1161 | * Small buffers are allocated with kmem_cache, to use space optimally. | |
1162 | * | |
1163 | * For large buffers, we choose between get_free_pages and vmalloc. | |
1164 | * Each has advantages and disadvantages. | |
1165 | * | |
1166 | * __get_free_pages can randomly fail if the memory is fragmented. | |
1167 | * __vmalloc won't randomly fail, but vmalloc space is limited (it may be | |
1168 | * as low as 128M) so using it for caching is not appropriate. | |
1169 | * | |
1170 | * If the allocation may fail we use __get_free_pages. Memory fragmentation | |
1171 | * won't have a fatal effect here, but it just causes flushes of some other | |
1172 | * buffers and more I/O will be performed. Don't use __get_free_pages if it | |
5e0a760b | 1173 | * always fails (i.e. order > MAX_PAGE_ORDER). |
95d402f0 MP |
1174 | * |
1175 | * If the allocation shouldn't fail we use __vmalloc. This is only for the | |
1176 | * initial reserve allocation, so there's no risk of wasting all vmalloc | |
1177 | * space. | |
1178 | */ | |
1179 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | |
03b02939 | 1180 | unsigned char *data_mode) |
95d402f0 | 1181 | { |
21bb1327 | 1182 | if (unlikely(c->slab_cache != NULL)) { |
95d402f0 | 1183 | *data_mode = DATA_MODE_SLAB; |
21bb1327 | 1184 | return kmem_cache_alloc(c->slab_cache, gfp_mask); |
95d402f0 MP |
1185 | } |
1186 | ||
f51f2e0a | 1187 | if (c->block_size <= KMALLOC_MAX_SIZE && |
95d402f0 MP |
1188 | gfp_mask & __GFP_NORETRY) { |
1189 | *data_mode = DATA_MODE_GET_FREE_PAGES; | |
1190 | return (void *)__get_free_pages(gfp_mask, | |
f51f2e0a | 1191 | c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); |
95d402f0 MP |
1192 | } |
1193 | ||
1194 | *data_mode = DATA_MODE_VMALLOC; | |
502624bd | 1195 | |
88dca4ca | 1196 | return __vmalloc(c->block_size, gfp_mask); |
95d402f0 MP |
1197 | } |
1198 | ||
1199 | /* | |
1200 | * Free buffer's data. | |
1201 | */ | |
1202 | static void free_buffer_data(struct dm_bufio_client *c, | |
03b02939 | 1203 | void *data, unsigned char data_mode) |
95d402f0 MP |
1204 | { |
1205 | switch (data_mode) { | |
1206 | case DATA_MODE_SLAB: | |
21bb1327 | 1207 | kmem_cache_free(c->slab_cache, data); |
95d402f0 MP |
1208 | break; |
1209 | ||
1210 | case DATA_MODE_GET_FREE_PAGES: | |
f51f2e0a MP |
1211 | free_pages((unsigned long)data, |
1212 | c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); | |
95d402f0 MP |
1213 | break; |
1214 | ||
1215 | case DATA_MODE_VMALLOC: | |
1216 | vfree(data); | |
1217 | break; | |
1218 | ||
1219 | default: | |
1220 | DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d", | |
1221 | data_mode); | |
1222 | BUG(); | |
1223 | } | |
1224 | } | |
1225 | ||
1226 | /* | |
1227 | * Allocate buffer and its data. | |
1228 | */ | |
1229 | static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) | |
1230 | { | |
359dbf19 | 1231 | struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); |
95d402f0 MP |
1232 | |
1233 | if (!b) | |
1234 | return NULL; | |
1235 | ||
1236 | b->c = c; | |
1237 | ||
1238 | b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); | |
1239 | if (!b->data) { | |
359dbf19 | 1240 | kmem_cache_free(c->slab_buffer, b); |
95d402f0 MP |
1241 | return NULL; |
1242 | } | |
450e8dee | 1243 | adjust_total_allocated(b, false); |
95d402f0 | 1244 | |
86bad0c7 | 1245 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
741b58f3 | 1246 | b->stack_len = 0; |
86bad0c7 | 1247 | #endif |
95d402f0 MP |
1248 | return b; |
1249 | } | |
1250 | ||
1251 | /* | |
1252 | * Free buffer and its data. | |
1253 | */ | |
1254 | static void free_buffer(struct dm_buffer *b) | |
1255 | { | |
1256 | struct dm_bufio_client *c = b->c; | |
1257 | ||
450e8dee | 1258 | adjust_total_allocated(b, true); |
95d402f0 | 1259 | free_buffer_data(c, b->data, b->data_mode); |
359dbf19 | 1260 | kmem_cache_free(c->slab_buffer, b); |
95d402f0 MP |
1261 | } |
1262 | ||
a4a82ce3 HM |
1263 | /* |
1264 | *-------------------------------------------------------------------------- | |
95d402f0 MP |
1265 | * Submit I/O on the buffer. |
1266 | * | |
1267 | * Bio interface is faster but it has some problems: | |
1268 | * the vector list is limited (increasing this limit increases | |
1269 | * memory-consumption per buffer, so it is not viable); | |
1270 | * | |
1271 | * the memory must be direct-mapped, not vmalloced; | |
1272 | * | |
95d402f0 MP |
1273 | * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and |
1274 | * it is not vmalloced, try using the bio interface. | |
1275 | * | |
1276 | * If the buffer is big, if it is vmalloced or if the underlying device | |
1277 | * rejects the bio because it is too large, use dm-io layer to do the I/O. | |
1278 | * The dm-io layer splits the I/O into multiple requests, avoiding the above | |
1279 | * shortcomings. | |
a4a82ce3 HM |
1280 | *-------------------------------------------------------------------------- |
1281 | */ | |
95d402f0 MP |
1282 | |
1283 | /* | |
1284 | * dm-io completion routine. It just calls b->bio.bi_end_io, pretending | |
1285 | * that the request was handled directly with bio interface. | |
1286 | */ | |
1287 | static void dmio_complete(unsigned long error, void *context) | |
1288 | { | |
1289 | struct dm_buffer *b = context; | |
1290 | ||
45354f1e | 1291 | b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); |
95d402f0 MP |
1292 | } |
1293 | ||
a3282b43 | 1294 | static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, |
86a3238c | 1295 | unsigned int n_sectors, unsigned int offset) |
95d402f0 MP |
1296 | { |
1297 | int r; | |
1298 | struct dm_io_request io_req = { | |
a3282b43 | 1299 | .bi_opf = op, |
95d402f0 MP |
1300 | .notify.fn = dmio_complete, |
1301 | .notify.context = b, | |
1302 | .client = b->c->dm_io, | |
1303 | }; | |
1304 | struct dm_io_region region = { | |
1305 | .bdev = b->c->bdev, | |
400a0bef MP |
1306 | .sector = sector, |
1307 | .count = n_sectors, | |
95d402f0 MP |
1308 | }; |
1309 | ||
1310 | if (b->data_mode != DATA_MODE_VMALLOC) { | |
1311 | io_req.mem.type = DM_IO_KMEM; | |
1e3b21c6 | 1312 | io_req.mem.ptr.addr = (char *)b->data + offset; |
95d402f0 MP |
1313 | } else { |
1314 | io_req.mem.type = DM_IO_VMA; | |
1e3b21c6 | 1315 | io_req.mem.ptr.vma = (char *)b->data + offset; |
95d402f0 MP |
1316 | } |
1317 | ||
95d402f0 | 1318 | r = dm_io(&io_req, 1, ®ion, NULL); |
45354f1e MP |
1319 | if (unlikely(r)) |
1320 | b->end_io(b, errno_to_blk_status(r)); | |
95d402f0 MP |
1321 | } |
1322 | ||
45354f1e | 1323 | static void bio_complete(struct bio *bio) |
445559cd | 1324 | { |
45354f1e | 1325 | struct dm_buffer *b = bio->bi_private; |
4e4cbee9 | 1326 | blk_status_t status = bio->bi_status; |
0ef0b471 | 1327 | |
066ff571 CH |
1328 | bio_uninit(bio); |
1329 | kfree(bio); | |
45354f1e | 1330 | b->end_io(b, status); |
445559cd DW |
1331 | } |
1332 | ||
a3282b43 | 1333 | static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, |
86a3238c | 1334 | unsigned int n_sectors, unsigned int offset) |
95d402f0 | 1335 | { |
45354f1e | 1336 | struct bio *bio; |
95d402f0 | 1337 | char *ptr; |
56c5de44 | 1338 | unsigned int len; |
95d402f0 | 1339 | |
56c5de44 | 1340 | bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); |
45354f1e | 1341 | if (!bio) { |
a3282b43 | 1342 | use_dmio(b, op, sector, n_sectors, offset); |
45354f1e MP |
1343 | return; |
1344 | } | |
56c5de44 | 1345 | bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); |
45354f1e | 1346 | bio->bi_iter.bi_sector = sector; |
45354f1e MP |
1347 | bio->bi_end_io = bio_complete; |
1348 | bio->bi_private = b; | |
95d402f0 | 1349 | |
1e3b21c6 | 1350 | ptr = (char *)b->data + offset; |
400a0bef | 1351 | len = n_sectors << SECTOR_SHIFT; |
95d402f0 | 1352 | |
56c5de44 | 1353 | __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr)); |
95d402f0 | 1354 | |
45354f1e | 1355 | submit_bio(bio); |
95d402f0 MP |
1356 | } |
1357 | ||
6fbeb004 MP |
1358 | static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) |
1359 | { | |
1360 | sector_t sector; | |
1361 | ||
1362 | if (likely(c->sectors_per_block_bits >= 0)) | |
1363 | sector = block << c->sectors_per_block_bits; | |
1364 | else | |
1365 | sector = block * (c->block_size >> SECTOR_SHIFT); | |
1366 | sector += c->start; | |
1367 | ||
1368 | return sector; | |
1369 | } | |
1370 | ||
a3282b43 BVA |
1371 | static void submit_io(struct dm_buffer *b, enum req_op op, |
1372 | void (*end_io)(struct dm_buffer *, blk_status_t)) | |
95d402f0 | 1373 | { |
86a3238c | 1374 | unsigned int n_sectors; |
400a0bef | 1375 | sector_t sector; |
86a3238c | 1376 | unsigned int offset, end; |
95d402f0 | 1377 | |
45354f1e MP |
1378 | b->end_io = end_io; |
1379 | ||
6fbeb004 | 1380 | sector = block_to_sector(b->c, b->block); |
1e3b21c6 | 1381 | |
a3282b43 | 1382 | if (op != REQ_OP_WRITE) { |
f51f2e0a | 1383 | n_sectors = b->c->block_size >> SECTOR_SHIFT; |
1e3b21c6 MP |
1384 | offset = 0; |
1385 | } else { | |
1386 | if (b->c->write_callback) | |
1387 | b->c->write_callback(b); | |
1388 | offset = b->write_start; | |
1389 | end = b->write_end; | |
1390 | offset &= -DM_BUFIO_WRITE_ALIGN; | |
1391 | end += DM_BUFIO_WRITE_ALIGN - 1; | |
1392 | end &= -DM_BUFIO_WRITE_ALIGN; | |
1393 | if (unlikely(end > b->c->block_size)) | |
1394 | end = b->c->block_size; | |
1395 | ||
1396 | sector += offset >> SECTOR_SHIFT; | |
1397 | n_sectors = (end - offset) >> SECTOR_SHIFT; | |
1398 | } | |
400a0bef | 1399 | |
45354f1e | 1400 | if (b->data_mode != DATA_MODE_VMALLOC) |
a3282b43 | 1401 | use_bio(b, op, sector, n_sectors, offset); |
95d402f0 | 1402 | else |
a3282b43 | 1403 | use_dmio(b, op, sector, n_sectors, offset); |
95d402f0 MP |
1404 | } |
1405 | ||
a4a82ce3 HM |
1406 | /* |
1407 | *-------------------------------------------------------------- | |
95d402f0 | 1408 | * Writing dirty buffers |
a4a82ce3 HM |
1409 | *-------------------------------------------------------------- |
1410 | */ | |
95d402f0 MP |
1411 | |
1412 | /* | |
1413 | * The endio routine for write. | |
1414 | * | |
1415 | * Set the error, clear B_WRITING bit and wake anyone who was waiting on | |
1416 | * it. | |
1417 | */ | |
45354f1e | 1418 | static void write_endio(struct dm_buffer *b, blk_status_t status) |
95d402f0 | 1419 | { |
45354f1e MP |
1420 | b->write_error = status; |
1421 | if (unlikely(status)) { | |
95d402f0 | 1422 | struct dm_bufio_client *c = b->c; |
4e4cbee9 CH |
1423 | |
1424 | (void)cmpxchg(&c->async_write_error, 0, | |
45354f1e | 1425 | blk_status_to_errno(status)); |
95d402f0 MP |
1426 | } |
1427 | ||
1428 | BUG_ON(!test_bit(B_WRITING, &b->state)); | |
1429 | ||
4e857c58 | 1430 | smp_mb__before_atomic(); |
95d402f0 | 1431 | clear_bit(B_WRITING, &b->state); |
4e857c58 | 1432 | smp_mb__after_atomic(); |
95d402f0 MP |
1433 | |
1434 | wake_up_bit(&b->state, B_WRITING); | |
1435 | } | |
1436 | ||
95d402f0 MP |
1437 | /* |
1438 | * Initiate a write on a dirty buffer, but don't wait for it. | |
1439 | * | |
1440 | * - If the buffer is not dirty, exit. | |
1441 | * - If there some previous write going on, wait for it to finish (we can't | |
1442 | * have two writes on the same buffer simultaneously). | |
1443 | * - Submit our write and don't wait on it. We set B_WRITING indicating | |
1444 | * that there is a write in progress. | |
1445 | */ | |
2480945c MP |
1446 | static void __write_dirty_buffer(struct dm_buffer *b, |
1447 | struct list_head *write_list) | |
95d402f0 MP |
1448 | { |
1449 | if (!test_bit(B_DIRTY, &b->state)) | |
1450 | return; | |
1451 | ||
1452 | clear_bit(B_DIRTY, &b->state); | |
74316201 | 1453 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
95d402f0 | 1454 | |
1e3b21c6 MP |
1455 | b->write_start = b->dirty_start; |
1456 | b->write_end = b->dirty_end; | |
1457 | ||
2480945c | 1458 | if (!write_list) |
905be0a1 | 1459 | submit_io(b, REQ_OP_WRITE, write_endio); |
2480945c MP |
1460 | else |
1461 | list_add_tail(&b->write_list, write_list); | |
1462 | } | |
1463 | ||
1464 | static void __flush_write_list(struct list_head *write_list) | |
1465 | { | |
1466 | struct blk_plug plug; | |
0ef0b471 | 1467 | |
2480945c MP |
1468 | blk_start_plug(&plug); |
1469 | while (!list_empty(write_list)) { | |
1470 | struct dm_buffer *b = | |
1471 | list_entry(write_list->next, struct dm_buffer, write_list); | |
1472 | list_del(&b->write_list); | |
905be0a1 | 1473 | submit_io(b, REQ_OP_WRITE, write_endio); |
7cd32674 | 1474 | cond_resched(); |
2480945c MP |
1475 | } |
1476 | blk_finish_plug(&plug); | |
95d402f0 MP |
1477 | } |
1478 | ||
1479 | /* | |
1480 | * Wait until any activity on the buffer finishes. Possibly write the | |
1481 | * buffer if it is dirty. When this function finishes, there is no I/O | |
1482 | * running on the buffer and the buffer is not dirty. | |
1483 | */ | |
1484 | static void __make_buffer_clean(struct dm_buffer *b) | |
1485 | { | |
450e8dee | 1486 | BUG_ON(atomic_read(&b->hold_count)); |
95d402f0 | 1487 | |
141b3523 MP |
1488 | /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ |
1489 | if (!smp_load_acquire(&b->state)) /* fast case */ | |
95d402f0 MP |
1490 | return; |
1491 | ||
74316201 | 1492 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
2480945c | 1493 | __write_dirty_buffer(b, NULL); |
74316201 | 1494 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
95d402f0 MP |
1495 | } |
1496 | ||
450e8dee JT |
1497 | static enum evict_result is_clean(struct dm_buffer *b, void *context) |
1498 | { | |
1499 | struct dm_bufio_client *c = context; | |
1500 | ||
1501 | /* These should never happen */ | |
1502 | if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) | |
1503 | return ER_DONT_EVICT; | |
1504 | if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) | |
1505 | return ER_DONT_EVICT; | |
1506 | if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) | |
1507 | return ER_DONT_EVICT; | |
1508 | ||
1509 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && | |
1510 | unlikely(test_bit(B_READING, &b->state))) | |
1511 | return ER_DONT_EVICT; | |
1512 | ||
1513 | return ER_EVICT; | |
1514 | } | |
1515 | ||
1516 | static enum evict_result is_dirty(struct dm_buffer *b, void *context) | |
1517 | { | |
1518 | /* These should never happen */ | |
1519 | if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) | |
1520 | return ER_DONT_EVICT; | |
1521 | if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) | |
1522 | return ER_DONT_EVICT; | |
1523 | ||
1524 | return ER_EVICT; | |
1525 | } | |
1526 | ||
95d402f0 MP |
1527 | /* |
1528 | * Find some buffer that is not held by anybody, clean it, unlink it and | |
1529 | * return it. | |
1530 | */ | |
1531 | static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) | |
1532 | { | |
1533 | struct dm_buffer *b; | |
1534 | ||
450e8dee JT |
1535 | b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); |
1536 | if (b) { | |
1537 | /* this also waits for pending reads */ | |
1538 | __make_buffer_clean(b); | |
1539 | return b; | |
95d402f0 MP |
1540 | } |
1541 | ||
e3a7c294 MP |
1542 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
1543 | return NULL; | |
1544 | ||
450e8dee JT |
1545 | b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); |
1546 | if (b) { | |
1547 | __make_buffer_clean(b); | |
1548 | return b; | |
95d402f0 MP |
1549 | } |
1550 | ||
1551 | return NULL; | |
1552 | } | |
1553 | ||
1554 | /* | |
1555 | * Wait until some other threads free some buffer or release hold count on | |
1556 | * some buffer. | |
1557 | * | |
1558 | * This function is entered with c->lock held, drops it and regains it | |
1559 | * before exiting. | |
1560 | */ | |
1561 | static void __wait_for_free_buffer(struct dm_bufio_client *c) | |
1562 | { | |
1563 | DECLARE_WAITQUEUE(wait, current); | |
1564 | ||
1565 | add_wait_queue(&c->free_buffer_wait, &wait); | |
642fa448 | 1566 | set_current_state(TASK_UNINTERRUPTIBLE); |
95d402f0 MP |
1567 | dm_bufio_unlock(c); |
1568 | ||
450e8dee JT |
1569 | /* |
1570 | * It's possible to miss a wake up event since we don't always | |
1571 | * hold c->lock when wake_up is called. So we have a timeout here, | |
1572 | * just in case. | |
1573 | */ | |
1574 | io_schedule_timeout(5 * HZ); | |
95d402f0 | 1575 | |
95d402f0 MP |
1576 | remove_wait_queue(&c->free_buffer_wait, &wait); |
1577 | ||
1578 | dm_bufio_lock(c); | |
1579 | } | |
1580 | ||
a66cc28f MP |
1581 | enum new_flag { |
1582 | NF_FRESH = 0, | |
1583 | NF_READ = 1, | |
1584 | NF_GET = 2, | |
1585 | NF_PREFETCH = 3 | |
1586 | }; | |
1587 | ||
95d402f0 MP |
1588 | /* |
1589 | * Allocate a new buffer. If the allocation is not possible, wait until | |
1590 | * some other thread frees a buffer. | |
1591 | * | |
1592 | * May drop the lock and regain it. | |
1593 | */ | |
a66cc28f | 1594 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) |
95d402f0 MP |
1595 | { |
1596 | struct dm_buffer *b; | |
41c73a49 | 1597 | bool tried_noio_alloc = false; |
95d402f0 MP |
1598 | |
1599 | /* | |
1600 | * dm-bufio is resistant to allocation failures (it just keeps | |
1601 | * one buffer reserved in cases all the allocations fail). | |
1602 | * So set flags to not try too hard: | |
9ea61cac DA |
1603 | * GFP_NOWAIT: don't wait; if we need to sleep we'll release our |
1604 | * mutex and wait ourselves. | |
95d402f0 MP |
1605 | * __GFP_NORETRY: don't retry and rather return failure |
1606 | * __GFP_NOMEMALLOC: don't use emergency reserves | |
1607 | * __GFP_NOWARN: don't print a warning in case of failure | |
1608 | * | |
1609 | * For debugging, if we set the cache size to 1, no new buffers will | |
1610 | * be allocated. | |
1611 | */ | |
1612 | while (1) { | |
1613 | if (dm_bufio_cache_size_latch != 1) { | |
9ea61cac | 1614 | b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); |
95d402f0 MP |
1615 | if (b) |
1616 | return b; | |
1617 | } | |
1618 | ||
a66cc28f MP |
1619 | if (nf == NF_PREFETCH) |
1620 | return NULL; | |
1621 | ||
41c73a49 MP |
1622 | if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { |
1623 | dm_bufio_unlock(c); | |
1624 | b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); | |
1625 | dm_bufio_lock(c); | |
1626 | if (b) | |
1627 | return b; | |
1628 | tried_noio_alloc = true; | |
1629 | } | |
1630 | ||
95d402f0 | 1631 | if (!list_empty(&c->reserved_buffers)) { |
450e8dee JT |
1632 | b = list_to_buffer(c->reserved_buffers.next); |
1633 | list_del(&b->lru.list); | |
95d402f0 MP |
1634 | c->need_reserved_buffers++; |
1635 | ||
1636 | return b; | |
1637 | } | |
1638 | ||
1639 | b = __get_unclaimed_buffer(c); | |
1640 | if (b) | |
1641 | return b; | |
1642 | ||
1643 | __wait_for_free_buffer(c); | |
1644 | } | |
1645 | } | |
1646 | ||
a66cc28f | 1647 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) |
95d402f0 | 1648 | { |
a66cc28f MP |
1649 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); |
1650 | ||
1651 | if (!b) | |
1652 | return NULL; | |
95d402f0 MP |
1653 | |
1654 | if (c->alloc_callback) | |
1655 | c->alloc_callback(b); | |
1656 | ||
1657 | return b; | |
1658 | } | |
1659 | ||
1660 | /* | |
1661 | * Free a buffer and wake other threads waiting for free buffers. | |
1662 | */ | |
1663 | static void __free_buffer_wake(struct dm_buffer *b) | |
1664 | { | |
1665 | struct dm_bufio_client *c = b->c; | |
1666 | ||
450e8dee | 1667 | b->block = -1; |
95d402f0 MP |
1668 | if (!c->need_reserved_buffers) |
1669 | free_buffer(b); | |
1670 | else { | |
450e8dee | 1671 | list_add(&b->lru.list, &c->reserved_buffers); |
95d402f0 MP |
1672 | c->need_reserved_buffers--; |
1673 | } | |
1674 | ||
f5f93541 MP |
1675 | /* |
1676 | * We hold the bufio lock here, so no one can add entries to the | |
1677 | * wait queue anyway. | |
1678 | */ | |
1679 | if (unlikely(waitqueue_active(&c->free_buffer_wait))) | |
1680 | wake_up(&c->free_buffer_wait); | |
95d402f0 MP |
1681 | } |
1682 | ||
450e8dee | 1683 | static enum evict_result cleaned(struct dm_buffer *b, void *context) |
95d402f0 | 1684 | { |
450e8dee JT |
1685 | if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) |
1686 | return ER_DONT_EVICT; /* should never happen */ | |
95d402f0 | 1687 | |
450e8dee JT |
1688 | if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) |
1689 | return ER_DONT_EVICT; | |
1690 | else | |
1691 | return ER_EVICT; | |
1692 | } | |
95d402f0 | 1693 | |
450e8dee JT |
1694 | static void __move_clean_buffers(struct dm_bufio_client *c) |
1695 | { | |
1696 | cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); | |
1697 | } | |
95d402f0 | 1698 | |
450e8dee JT |
1699 | struct write_context { |
1700 | int no_wait; | |
1701 | struct list_head *write_list; | |
1702 | }; | |
95d402f0 | 1703 | |
450e8dee JT |
1704 | static enum it_action write_one(struct dm_buffer *b, void *context) |
1705 | { | |
1706 | struct write_context *wc = context; | |
1707 | ||
1708 | if (wc->no_wait && test_bit(B_WRITING, &b->state)) | |
1709 | return IT_COMPLETE; | |
1710 | ||
1711 | __write_dirty_buffer(b, wc->write_list); | |
1712 | return IT_NEXT; | |
1713 | } | |
1714 | ||
1715 | static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, | |
1716 | struct list_head *write_list) | |
1717 | { | |
1718 | struct write_context wc = {.no_wait = no_wait, .write_list = write_list}; | |
1719 | ||
1720 | __move_clean_buffers(c); | |
1721 | cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); | |
95d402f0 MP |
1722 | } |
1723 | ||
95d402f0 MP |
1724 | /* |
1725 | * Check if we're over watermark. | |
1726 | * If we are over threshold_buffers, start freeing buffers. | |
1727 | * If we're over "limit_buffers", block until we get under the limit. | |
1728 | */ | |
2480945c MP |
1729 | static void __check_watermark(struct dm_bufio_client *c, |
1730 | struct list_head *write_list) | |
95d402f0 | 1731 | { |
450e8dee JT |
1732 | if (cache_count(&c->cache, LIST_DIRTY) > |
1733 | cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) | |
2480945c | 1734 | __write_dirty_buffers_async(c, 1, write_list); |
95d402f0 MP |
1735 | } |
1736 | ||
a4a82ce3 HM |
1737 | /* |
1738 | *-------------------------------------------------------------- | |
95d402f0 | 1739 | * Getting a buffer |
a4a82ce3 HM |
1740 | *-------------------------------------------------------------- |
1741 | */ | |
95d402f0 | 1742 | |
450e8dee JT |
1743 | static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) |
1744 | { | |
1745 | /* | |
1746 | * Relying on waitqueue_active() is racey, but we sleep | |
1747 | * with schedule_timeout anyway. | |
1748 | */ | |
1749 | if (cache_put(&c->cache, b) && | |
1750 | unlikely(waitqueue_active(&c->free_buffer_wait))) | |
1751 | wake_up(&c->free_buffer_wait); | |
1752 | } | |
1753 | ||
1754 | /* | |
1755 | * This assumes you have already checked the cache to see if the buffer | |
1756 | * is already present (it will recheck after dropping the lock for allocation). | |
1757 | */ | |
95d402f0 | 1758 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, |
2480945c MP |
1759 | enum new_flag nf, int *need_submit, |
1760 | struct list_head *write_list) | |
95d402f0 MP |
1761 | { |
1762 | struct dm_buffer *b, *new_b = NULL; | |
1763 | ||
1764 | *need_submit = 0; | |
1765 | ||
450e8dee JT |
1766 | /* This can't be called with NF_GET */ |
1767 | if (WARN_ON_ONCE(nf == NF_GET)) | |
95d402f0 MP |
1768 | return NULL; |
1769 | ||
a66cc28f MP |
1770 | new_b = __alloc_buffer_wait(c, nf); |
1771 | if (!new_b) | |
1772 | return NULL; | |
95d402f0 MP |
1773 | |
1774 | /* | |
1775 | * We've had a period where the mutex was unlocked, so need to | |
ef992373 | 1776 | * recheck the buffer tree. |
95d402f0 | 1777 | */ |
450e8dee | 1778 | b = cache_get(&c->cache, block); |
95d402f0 MP |
1779 | if (b) { |
1780 | __free_buffer_wake(new_b); | |
a66cc28f | 1781 | goto found_buffer; |
95d402f0 MP |
1782 | } |
1783 | ||
2480945c | 1784 | __check_watermark(c, write_list); |
95d402f0 MP |
1785 | |
1786 | b = new_b; | |
450e8dee JT |
1787 | atomic_set(&b->hold_count, 1); |
1788 | WRITE_ONCE(b->last_accessed, jiffies); | |
1789 | b->block = block; | |
95d402f0 MP |
1790 | b->read_error = 0; |
1791 | b->write_error = 0; | |
450e8dee | 1792 | b->list_mode = LIST_CLEAN; |
95d402f0 | 1793 | |
450e8dee | 1794 | if (nf == NF_FRESH) |
95d402f0 | 1795 | b->state = 0; |
450e8dee JT |
1796 | else { |
1797 | b->state = 1 << B_READING; | |
1798 | *need_submit = 1; | |
95d402f0 MP |
1799 | } |
1800 | ||
450e8dee JT |
1801 | /* |
1802 | * We mustn't insert into the cache until the B_READING state | |
1803 | * is set. Otherwise another thread could get it and use | |
1804 | * it before it had been read. | |
1805 | */ | |
1806 | cache_insert(&c->cache, b); | |
95d402f0 MP |
1807 | |
1808 | return b; | |
a66cc28f MP |
1809 | |
1810 | found_buffer: | |
450e8dee JT |
1811 | if (nf == NF_PREFETCH) { |
1812 | cache_put_and_wake(c, b); | |
a66cc28f | 1813 | return NULL; |
450e8dee JT |
1814 | } |
1815 | ||
a66cc28f MP |
1816 | /* |
1817 | * Note: it is essential that we don't wait for the buffer to be | |
1818 | * read if dm_bufio_get function is used. Both dm_bufio_get and | |
1819 | * dm_bufio_prefetch can be used in the driver request routine. | |
1820 | * If the user called both dm_bufio_prefetch and dm_bufio_get on | |
1821 | * the same buffer, it would deadlock if we waited. | |
1822 | */ | |
450e8dee JT |
1823 | if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { |
1824 | cache_put_and_wake(c, b); | |
a66cc28f | 1825 | return NULL; |
450e8dee | 1826 | } |
a66cc28f | 1827 | |
a66cc28f | 1828 | return b; |
95d402f0 MP |
1829 | } |
1830 | ||
1831 | /* | |
1832 | * The endio routine for reading: set the error, clear the bit and wake up | |
1833 | * anyone waiting on the buffer. | |
1834 | */ | |
45354f1e | 1835 | static void read_endio(struct dm_buffer *b, blk_status_t status) |
95d402f0 | 1836 | { |
45354f1e | 1837 | b->read_error = status; |
95d402f0 MP |
1838 | |
1839 | BUG_ON(!test_bit(B_READING, &b->state)); | |
1840 | ||
4e857c58 | 1841 | smp_mb__before_atomic(); |
95d402f0 | 1842 | clear_bit(B_READING, &b->state); |
4e857c58 | 1843 | smp_mb__after_atomic(); |
95d402f0 MP |
1844 | |
1845 | wake_up_bit(&b->state, B_READING); | |
1846 | } | |
1847 | ||
1848 | /* | |
1849 | * A common routine for dm_bufio_new and dm_bufio_read. Operation of these | |
1850 | * functions is similar except that dm_bufio_new doesn't read the | |
1851 | * buffer from the disk (assuming that the caller overwrites all the data | |
1852 | * and uses dm_bufio_mark_buffer_dirty to write new data back). | |
1853 | */ | |
1854 | static void *new_read(struct dm_bufio_client *c, sector_t block, | |
1855 | enum new_flag nf, struct dm_buffer **bp) | |
1856 | { | |
450e8dee | 1857 | int need_submit = 0; |
95d402f0 MP |
1858 | struct dm_buffer *b; |
1859 | ||
2480945c MP |
1860 | LIST_HEAD(write_list); |
1861 | ||
450e8dee JT |
1862 | *bp = NULL; |
1863 | ||
1864 | /* | |
1865 | * Fast path, hopefully the block is already in the cache. No need | |
1866 | * to get the client lock for this. | |
1867 | */ | |
1868 | b = cache_get(&c->cache, block); | |
1869 | if (b) { | |
1870 | if (nf == NF_PREFETCH) { | |
1871 | cache_put_and_wake(c, b); | |
1872 | return NULL; | |
1873 | } | |
1874 | ||
1875 | /* | |
1876 | * Note: it is essential that we don't wait for the buffer to be | |
1877 | * read if dm_bufio_get function is used. Both dm_bufio_get and | |
1878 | * dm_bufio_prefetch can be used in the driver request routine. | |
1879 | * If the user called both dm_bufio_prefetch and dm_bufio_get on | |
1880 | * the same buffer, it would deadlock if we waited. | |
1881 | */ | |
1882 | if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { | |
1883 | cache_put_and_wake(c, b); | |
1884 | return NULL; | |
1885 | } | |
1886 | } | |
1887 | ||
1888 | if (!b) { | |
1889 | if (nf == NF_GET) | |
1890 | return NULL; | |
1891 | ||
1892 | dm_bufio_lock(c); | |
1893 | b = __bufio_new(c, block, nf, &need_submit, &write_list); | |
1894 | dm_bufio_unlock(c); | |
1895 | } | |
1896 | ||
86bad0c7 | 1897 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
450e8dee | 1898 | if (b && (atomic_read(&b->hold_count) == 1)) |
86bad0c7 MP |
1899 | buffer_record_stack(b); |
1900 | #endif | |
95d402f0 | 1901 | |
2480945c MP |
1902 | __flush_write_list(&write_list); |
1903 | ||
a66cc28f | 1904 | if (!b) |
f98c8f79 | 1905 | return NULL; |
95d402f0 MP |
1906 | |
1907 | if (need_submit) | |
905be0a1 | 1908 | submit_io(b, REQ_OP_READ, read_endio); |
95d402f0 | 1909 | |
2a695062 MP |
1910 | if (nf != NF_GET) /* we already tested this condition above */ |
1911 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); | |
95d402f0 MP |
1912 | |
1913 | if (b->read_error) { | |
4e4cbee9 | 1914 | int error = blk_status_to_errno(b->read_error); |
95d402f0 MP |
1915 | |
1916 | dm_bufio_release(b); | |
1917 | ||
1918 | return ERR_PTR(error); | |
1919 | } | |
1920 | ||
1921 | *bp = b; | |
1922 | ||
1923 | return b->data; | |
1924 | } | |
1925 | ||
1926 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, | |
1927 | struct dm_buffer **bp) | |
1928 | { | |
1929 | return new_read(c, block, NF_GET, bp); | |
1930 | } | |
1931 | EXPORT_SYMBOL_GPL(dm_bufio_get); | |
1932 | ||
1933 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, | |
1934 | struct dm_buffer **bp) | |
1935 | { | |
05112287 MS |
1936 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1937 | return ERR_PTR(-EINVAL); | |
95d402f0 MP |
1938 | |
1939 | return new_read(c, block, NF_READ, bp); | |
1940 | } | |
1941 | EXPORT_SYMBOL_GPL(dm_bufio_read); | |
1942 | ||
1943 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, | |
1944 | struct dm_buffer **bp) | |
1945 | { | |
05112287 MS |
1946 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1947 | return ERR_PTR(-EINVAL); | |
95d402f0 MP |
1948 | |
1949 | return new_read(c, block, NF_FRESH, bp); | |
1950 | } | |
1951 | EXPORT_SYMBOL_GPL(dm_bufio_new); | |
1952 | ||
a66cc28f | 1953 | void dm_bufio_prefetch(struct dm_bufio_client *c, |
86a3238c | 1954 | sector_t block, unsigned int n_blocks) |
a66cc28f MP |
1955 | { |
1956 | struct blk_plug plug; | |
1957 | ||
2480945c MP |
1958 | LIST_HEAD(write_list); |
1959 | ||
05112287 MS |
1960 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1961 | return; /* should never happen */ | |
3b6b7813 | 1962 | |
a66cc28f | 1963 | blk_start_plug(&plug); |
a66cc28f MP |
1964 | |
1965 | for (; n_blocks--; block++) { | |
1966 | int need_submit; | |
1967 | struct dm_buffer *b; | |
0ef0b471 | 1968 | |
450e8dee JT |
1969 | b = cache_get(&c->cache, block); |
1970 | if (b) { | |
1971 | /* already in cache */ | |
1972 | cache_put_and_wake(c, b); | |
1973 | continue; | |
1974 | } | |
1975 | ||
1976 | dm_bufio_lock(c); | |
2480945c MP |
1977 | b = __bufio_new(c, block, NF_PREFETCH, &need_submit, |
1978 | &write_list); | |
1979 | if (unlikely(!list_empty(&write_list))) { | |
1980 | dm_bufio_unlock(c); | |
1981 | blk_finish_plug(&plug); | |
1982 | __flush_write_list(&write_list); | |
1983 | blk_start_plug(&plug); | |
1984 | dm_bufio_lock(c); | |
1985 | } | |
a66cc28f MP |
1986 | if (unlikely(b != NULL)) { |
1987 | dm_bufio_unlock(c); | |
1988 | ||
1989 | if (need_submit) | |
905be0a1 | 1990 | submit_io(b, REQ_OP_READ, read_endio); |
a66cc28f MP |
1991 | dm_bufio_release(b); |
1992 | ||
7cd32674 | 1993 | cond_resched(); |
a66cc28f MP |
1994 | |
1995 | if (!n_blocks) | |
1996 | goto flush_plug; | |
1997 | dm_bufio_lock(c); | |
1998 | } | |
450e8dee | 1999 | dm_bufio_unlock(c); |
a66cc28f MP |
2000 | } |
2001 | ||
a66cc28f MP |
2002 | flush_plug: |
2003 | blk_finish_plug(&plug); | |
2004 | } | |
2005 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch); | |
2006 | ||
95d402f0 MP |
2007 | void dm_bufio_release(struct dm_buffer *b) |
2008 | { | |
2009 | struct dm_bufio_client *c = b->c; | |
2010 | ||
450e8dee JT |
2011 | /* |
2012 | * If there were errors on the buffer, and the buffer is not | |
2013 | * to be written, free the buffer. There is no point in caching | |
2014 | * invalid buffer. | |
2015 | */ | |
2016 | if ((b->read_error || b->write_error) && | |
2017 | !test_bit_acquire(B_READING, &b->state) && | |
2018 | !test_bit(B_WRITING, &b->state) && | |
2019 | !test_bit(B_DIRTY, &b->state)) { | |
2020 | dm_bufio_lock(c); | |
95d402f0 | 2021 | |
450e8dee JT |
2022 | /* cache remove can fail if there are other holders */ |
2023 | if (cache_remove(&c->cache, b)) { | |
95d402f0 | 2024 | __free_buffer_wake(b); |
450e8dee JT |
2025 | dm_bufio_unlock(c); |
2026 | return; | |
95d402f0 | 2027 | } |
450e8dee JT |
2028 | |
2029 | dm_bufio_unlock(c); | |
95d402f0 MP |
2030 | } |
2031 | ||
450e8dee | 2032 | cache_put_and_wake(c, b); |
95d402f0 MP |
2033 | } |
2034 | EXPORT_SYMBOL_GPL(dm_bufio_release); | |
2035 | ||
1e3b21c6 | 2036 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, |
86a3238c | 2037 | unsigned int start, unsigned int end) |
95d402f0 MP |
2038 | { |
2039 | struct dm_bufio_client *c = b->c; | |
2040 | ||
1e3b21c6 MP |
2041 | BUG_ON(start >= end); |
2042 | BUG_ON(end > b->c->block_size); | |
2043 | ||
95d402f0 MP |
2044 | dm_bufio_lock(c); |
2045 | ||
a66cc28f MP |
2046 | BUG_ON(test_bit(B_READING, &b->state)); |
2047 | ||
1e3b21c6 MP |
2048 | if (!test_and_set_bit(B_DIRTY, &b->state)) { |
2049 | b->dirty_start = start; | |
2050 | b->dirty_end = end; | |
450e8dee | 2051 | cache_mark(&c->cache, b, LIST_DIRTY); |
1e3b21c6 MP |
2052 | } else { |
2053 | if (start < b->dirty_start) | |
2054 | b->dirty_start = start; | |
2055 | if (end > b->dirty_end) | |
2056 | b->dirty_end = end; | |
2057 | } | |
95d402f0 MP |
2058 | |
2059 | dm_bufio_unlock(c); | |
2060 | } | |
1e3b21c6 MP |
2061 | EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); |
2062 | ||
2063 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) | |
2064 | { | |
2065 | dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); | |
2066 | } | |
95d402f0 MP |
2067 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); |
2068 | ||
2069 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) | |
2070 | { | |
2480945c MP |
2071 | LIST_HEAD(write_list); |
2072 | ||
05112287 MS |
2073 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2074 | return; /* should never happen */ | |
95d402f0 MP |
2075 | |
2076 | dm_bufio_lock(c); | |
2480945c | 2077 | __write_dirty_buffers_async(c, 0, &write_list); |
95d402f0 | 2078 | dm_bufio_unlock(c); |
2480945c | 2079 | __flush_write_list(&write_list); |
95d402f0 MP |
2080 | } |
2081 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); | |
2082 | ||
2083 | /* | |
2084 | * For performance, it is essential that the buffers are written asynchronously | |
2085 | * and simultaneously (so that the block layer can merge the writes) and then | |
2086 | * waited upon. | |
2087 | * | |
2088 | * Finally, we flush hardware disk cache. | |
2089 | */ | |
450e8dee JT |
2090 | static bool is_writing(struct lru_entry *e, void *context) |
2091 | { | |
2092 | struct dm_buffer *b = le_to_buffer(e); | |
2093 | ||
2094 | return test_bit(B_WRITING, &b->state); | |
2095 | } | |
2096 | ||
95d402f0 MP |
2097 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
2098 | { | |
edc11d49 | 2099 | int a, f; |
450e8dee JT |
2100 | unsigned long nr_buffers; |
2101 | struct lru_entry *e; | |
2102 | struct lru_iter it; | |
95d402f0 | 2103 | |
2480945c MP |
2104 | LIST_HEAD(write_list); |
2105 | ||
2106 | dm_bufio_lock(c); | |
2107 | __write_dirty_buffers_async(c, 0, &write_list); | |
2108 | dm_bufio_unlock(c); | |
2109 | __flush_write_list(&write_list); | |
95d402f0 | 2110 | dm_bufio_lock(c); |
95d402f0 | 2111 | |
450e8dee JT |
2112 | nr_buffers = cache_count(&c->cache, LIST_DIRTY); |
2113 | lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); | |
2114 | while ((e = lru_iter_next(&it, is_writing, c))) { | |
2115 | struct dm_buffer *b = le_to_buffer(e); | |
2116 | __cache_inc_buffer(b); | |
95d402f0 MP |
2117 | |
2118 | BUG_ON(test_bit(B_READING, &b->state)); | |
2119 | ||
450e8dee JT |
2120 | if (nr_buffers) { |
2121 | nr_buffers--; | |
2122 | dm_bufio_unlock(c); | |
2123 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); | |
2124 | dm_bufio_lock(c); | |
2125 | } else { | |
2126 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); | |
95d402f0 MP |
2127 | } |
2128 | ||
450e8dee JT |
2129 | if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) |
2130 | cache_mark(&c->cache, b, LIST_CLEAN); | |
95d402f0 | 2131 | |
450e8dee | 2132 | cache_put_and_wake(c, b); |
95d402f0 | 2133 | |
450e8dee | 2134 | cond_resched(); |
95d402f0 | 2135 | } |
450e8dee JT |
2136 | lru_iter_end(&it); |
2137 | ||
95d402f0 MP |
2138 | wake_up(&c->free_buffer_wait); |
2139 | dm_bufio_unlock(c); | |
2140 | ||
2141 | a = xchg(&c->async_write_error, 0); | |
2142 | f = dm_bufio_issue_flush(c); | |
2143 | if (a) | |
2144 | return a; | |
2145 | ||
2146 | return f; | |
2147 | } | |
2148 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); | |
2149 | ||
2150 | /* | |
ef992373 | 2151 | * Use dm-io to send an empty barrier to flush the device. |
95d402f0 MP |
2152 | */ |
2153 | int dm_bufio_issue_flush(struct dm_bufio_client *c) | |
2154 | { | |
2155 | struct dm_io_request io_req = { | |
581075e4 | 2156 | .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, |
95d402f0 MP |
2157 | .mem.type = DM_IO_KMEM, |
2158 | .mem.ptr.addr = NULL, | |
2159 | .client = c->dm_io, | |
2160 | }; | |
2161 | struct dm_io_region io_reg = { | |
2162 | .bdev = c->bdev, | |
2163 | .sector = 0, | |
2164 | .count = 0, | |
2165 | }; | |
2166 | ||
05112287 MS |
2167 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2168 | return -EINVAL; | |
95d402f0 MP |
2169 | |
2170 | return dm_io(&io_req, 1, &io_reg, NULL); | |
2171 | } | |
2172 | EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); | |
2173 | ||
6fbeb004 MP |
2174 | /* |
2175 | * Use dm-io to send a discard request to flush the device. | |
2176 | */ | |
2177 | int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) | |
2178 | { | |
2179 | struct dm_io_request io_req = { | |
581075e4 | 2180 | .bi_opf = REQ_OP_DISCARD | REQ_SYNC, |
6fbeb004 MP |
2181 | .mem.type = DM_IO_KMEM, |
2182 | .mem.ptr.addr = NULL, | |
2183 | .client = c->dm_io, | |
2184 | }; | |
2185 | struct dm_io_region io_reg = { | |
2186 | .bdev = c->bdev, | |
2187 | .sector = block_to_sector(c, block), | |
2188 | .count = block_to_sector(c, count), | |
2189 | }; | |
2190 | ||
05112287 MS |
2191 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2192 | return -EINVAL; /* discards are optional */ | |
6fbeb004 MP |
2193 | |
2194 | return dm_io(&io_req, 1, &io_reg, NULL); | |
2195 | } | |
2196 | EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); | |
2197 | ||
450e8dee | 2198 | static bool forget_buffer(struct dm_bufio_client *c, sector_t block) |
33a18062 | 2199 | { |
450e8dee JT |
2200 | struct dm_buffer *b; |
2201 | ||
2202 | b = cache_get(&c->cache, block); | |
2203 | if (b) { | |
2204 | if (likely(!smp_load_acquire(&b->state))) { | |
2205 | if (cache_remove(&c->cache, b)) | |
2206 | __free_buffer_wake(b); | |
2207 | else | |
2208 | cache_put_and_wake(c, b); | |
2209 | } else { | |
2210 | cache_put_and_wake(c, b); | |
2211 | } | |
33a18062 | 2212 | } |
450e8dee JT |
2213 | |
2214 | return b ? true : false; | |
33a18062 MP |
2215 | } |
2216 | ||
55494bf2 MP |
2217 | /* |
2218 | * Free the given buffer. | |
2219 | * | |
2220 | * This is just a hint, if the buffer is in use or dirty, this function | |
2221 | * does nothing. | |
2222 | */ | |
2223 | void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) | |
2224 | { | |
55494bf2 | 2225 | dm_bufio_lock(c); |
450e8dee | 2226 | forget_buffer(c, block); |
55494bf2 MP |
2227 | dm_bufio_unlock(c); |
2228 | } | |
afa53df8 | 2229 | EXPORT_SYMBOL_GPL(dm_bufio_forget); |
55494bf2 | 2230 | |
450e8dee | 2231 | static enum evict_result idle(struct dm_buffer *b, void *context) |
33a18062 | 2232 | { |
450e8dee JT |
2233 | return b->state ? ER_DONT_EVICT : ER_EVICT; |
2234 | } | |
33a18062 | 2235 | |
450e8dee JT |
2236 | void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) |
2237 | { | |
2238 | dm_bufio_lock(c); | |
2239 | cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); | |
2240 | dm_bufio_unlock(c); | |
33a18062 MP |
2241 | } |
2242 | EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); | |
2243 | ||
86a3238c | 2244 | void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n) |
55b082e6 MP |
2245 | { |
2246 | c->minimum_buffers = n; | |
2247 | } | |
afa53df8 | 2248 | EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers); |
55b082e6 | 2249 | |
86a3238c | 2250 | unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c) |
95d402f0 MP |
2251 | { |
2252 | return c->block_size; | |
2253 | } | |
2254 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); | |
2255 | ||
2256 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) | |
2257 | { | |
6dcbb52c | 2258 | sector_t s = bdev_nr_sectors(c->bdev); |
0ef0b471 | 2259 | |
a14e5ec6 MP |
2260 | if (s >= c->start) |
2261 | s -= c->start; | |
2262 | else | |
2263 | s = 0; | |
f51f2e0a MP |
2264 | if (likely(c->sectors_per_block_bits >= 0)) |
2265 | s >>= c->sectors_per_block_bits; | |
2266 | else | |
2267 | sector_div(s, c->block_size >> SECTOR_SHIFT); | |
2268 | return s; | |
95d402f0 MP |
2269 | } |
2270 | EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); | |
2271 | ||
9b594826 MP |
2272 | struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) |
2273 | { | |
2274 | return c->dm_io; | |
2275 | } | |
2276 | EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); | |
2277 | ||
95d402f0 MP |
2278 | sector_t dm_bufio_get_block_number(struct dm_buffer *b) |
2279 | { | |
2280 | return b->block; | |
2281 | } | |
2282 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); | |
2283 | ||
2284 | void *dm_bufio_get_block_data(struct dm_buffer *b) | |
2285 | { | |
2286 | return b->data; | |
2287 | } | |
2288 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); | |
2289 | ||
2290 | void *dm_bufio_get_aux_data(struct dm_buffer *b) | |
2291 | { | |
2292 | return b + 1; | |
2293 | } | |
2294 | EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); | |
2295 | ||
2296 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) | |
2297 | { | |
2298 | return b->c; | |
2299 | } | |
2300 | EXPORT_SYMBOL_GPL(dm_bufio_get_client); | |
2301 | ||
450e8dee JT |
2302 | static enum it_action warn_leak(struct dm_buffer *b, void *context) |
2303 | { | |
2304 | bool *warned = context; | |
2305 | ||
2306 | WARN_ON(!(*warned)); | |
2307 | *warned = true; | |
2308 | DMERR("leaked buffer %llx, hold count %u, list %d", | |
2309 | (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); | |
2310 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING | |
2311 | stack_trace_print(b->stack_entries, b->stack_len, 1); | |
2312 | /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */ | |
2313 | atomic_set(&b->hold_count, 0); | |
2314 | #endif | |
2315 | return IT_NEXT; | |
2316 | } | |
2317 | ||
95d402f0 MP |
2318 | static void drop_buffers(struct dm_bufio_client *c) |
2319 | { | |
95d402f0 | 2320 | int i; |
450e8dee | 2321 | struct dm_buffer *b; |
95d402f0 | 2322 | |
b75a80f4 MS |
2323 | if (WARN_ON(dm_bufio_in_request())) |
2324 | return; /* should never happen */ | |
95d402f0 MP |
2325 | |
2326 | /* | |
2327 | * An optimization so that the buffers are not written one-by-one. | |
2328 | */ | |
2329 | dm_bufio_write_dirty_buffers_async(c); | |
2330 | ||
2331 | dm_bufio_lock(c); | |
2332 | ||
2333 | while ((b = __get_unclaimed_buffer(c))) | |
2334 | __free_buffer_wake(b); | |
2335 | ||
450e8dee JT |
2336 | for (i = 0; i < LIST_SIZE; i++) { |
2337 | bool warned = false; | |
2338 | ||
2339 | cache_iterate(&c->cache, i, warn_leak, &warned); | |
2340 | } | |
86bad0c7 MP |
2341 | |
2342 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING | |
2343 | while ((b = __get_unclaimed_buffer(c))) | |
2344 | __free_buffer_wake(b); | |
2345 | #endif | |
95d402f0 MP |
2346 | |
2347 | for (i = 0; i < LIST_SIZE; i++) | |
450e8dee | 2348 | WARN_ON(cache_count(&c->cache, i)); |
95d402f0 MP |
2349 | |
2350 | dm_bufio_unlock(c); | |
2351 | } | |
2352 | ||
13840d38 | 2353 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
33096a78 | 2354 | { |
f51f2e0a | 2355 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
0ef0b471 | 2356 | |
f51f2e0a MP |
2357 | if (likely(c->sectors_per_block_bits >= 0)) |
2358 | retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; | |
2359 | else | |
2360 | retain_bytes /= c->block_size; | |
0ef0b471 | 2361 | |
f51f2e0a | 2362 | return retain_bytes; |
33096a78 JT |
2363 | } |
2364 | ||
70704c33 | 2365 | static void __scan(struct dm_bufio_client *c) |
95d402f0 MP |
2366 | { |
2367 | int l; | |
450e8dee | 2368 | struct dm_buffer *b; |
33096a78 | 2369 | unsigned long freed = 0; |
13840d38 | 2370 | unsigned long retain_target = get_retain_buffers(c); |
450e8dee | 2371 | unsigned long count = cache_total(&c->cache); |
95d402f0 MP |
2372 | |
2373 | for (l = 0; l < LIST_SIZE; l++) { | |
450e8dee | 2374 | while (true) { |
70704c33 MP |
2375 | if (count - freed <= retain_target) |
2376 | atomic_long_set(&c->need_shrink, 0); | |
2377 | if (!atomic_long_read(&c->need_shrink)) | |
450e8dee JT |
2378 | break; |
2379 | ||
2380 | b = cache_evict(&c->cache, l, | |
2381 | l == LIST_CLEAN ? is_clean : is_dirty, c); | |
2382 | if (!b) | |
2383 | break; | |
2384 | ||
2385 | __make_buffer_clean(b); | |
2386 | __free_buffer_wake(b); | |
2387 | ||
2388 | atomic_long_dec(&c->need_shrink); | |
2389 | freed++; | |
7cd32674 | 2390 | cond_resched(); |
7dc19d5a | 2391 | } |
95d402f0 MP |
2392 | } |
2393 | } | |
2394 | ||
70704c33 MP |
2395 | static void shrink_work(struct work_struct *w) |
2396 | { | |
2397 | struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); | |
2398 | ||
2399 | dm_bufio_lock(c); | |
2400 | __scan(c); | |
2401 | dm_bufio_unlock(c); | |
2402 | } | |
2403 | ||
2404 | static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
95d402f0 | 2405 | { |
7dc19d5a | 2406 | struct dm_bufio_client *c; |
95d402f0 | 2407 | |
1f1d459c | 2408 | c = shrink->private_data; |
70704c33 MP |
2409 | atomic_long_add(sc->nr_to_scan, &c->need_shrink); |
2410 | queue_work(dm_bufio_wq, &c->shrink_work); | |
95d402f0 | 2411 | |
70704c33 | 2412 | return sc->nr_to_scan; |
7dc19d5a | 2413 | } |
95d402f0 | 2414 | |
70704c33 | 2415 | static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
7dc19d5a | 2416 | { |
1f1d459c | 2417 | struct dm_bufio_client *c = shrink->private_data; |
450e8dee | 2418 | unsigned long count = cache_total(&c->cache); |
fbc7c07e | 2419 | unsigned long retain_target = get_retain_buffers(c); |
70704c33 MP |
2420 | unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); |
2421 | ||
2422 | if (unlikely(count < retain_target)) | |
2423 | count = 0; | |
2424 | else | |
2425 | count -= retain_target; | |
95d402f0 | 2426 | |
70704c33 MP |
2427 | if (unlikely(count < queued_for_cleanup)) |
2428 | count = 0; | |
2429 | else | |
2430 | count -= queued_for_cleanup; | |
2431 | ||
2432 | return count; | |
95d402f0 MP |
2433 | } |
2434 | ||
2435 | /* | |
2436 | * Create the buffering interface | |
2437 | */ | |
86a3238c HM |
2438 | struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size, |
2439 | unsigned int reserved_buffers, unsigned int aux_size, | |
95d402f0 | 2440 | void (*alloc_callback)(struct dm_buffer *), |
0fcb100d NH |
2441 | void (*write_callback)(struct dm_buffer *), |
2442 | unsigned int flags) | |
95d402f0 MP |
2443 | { |
2444 | int r; | |
1e84c4b7 | 2445 | unsigned int num_locks; |
95d402f0 | 2446 | struct dm_bufio_client *c; |
359dbf19 | 2447 | char slab_name[27]; |
95d402f0 | 2448 | |
f51f2e0a MP |
2449 | if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { |
2450 | DMERR("%s: block size not specified or is not multiple of 512b", __func__); | |
2451 | r = -EINVAL; | |
2452 | goto bad_client; | |
2453 | } | |
95d402f0 | 2454 | |
1e84c4b7 MS |
2455 | num_locks = dm_num_hash_locks(); |
2456 | c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); | |
95d402f0 MP |
2457 | if (!c) { |
2458 | r = -ENOMEM; | |
2459 | goto bad_client; | |
2460 | } | |
2a695062 | 2461 | cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); |
95d402f0 MP |
2462 | |
2463 | c->bdev = bdev; | |
2464 | c->block_size = block_size; | |
f51f2e0a MP |
2465 | if (is_power_of_2(block_size)) |
2466 | c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; | |
2467 | else | |
2468 | c->sectors_per_block_bits = -1; | |
95d402f0 | 2469 | |
95d402f0 MP |
2470 | c->alloc_callback = alloc_callback; |
2471 | c->write_callback = write_callback; | |
2472 | ||
3c1c875d | 2473 | if (flags & DM_BUFIO_CLIENT_NO_SLEEP) { |
b32d4582 | 2474 | c->no_sleep = true; |
3c1c875d MS |
2475 | static_branch_inc(&no_sleep_enabled); |
2476 | } | |
b32d4582 | 2477 | |
95d402f0 | 2478 | mutex_init(&c->lock); |
b32d4582 | 2479 | spin_lock_init(&c->spinlock); |
95d402f0 MP |
2480 | INIT_LIST_HEAD(&c->reserved_buffers); |
2481 | c->need_reserved_buffers = reserved_buffers; | |
2482 | ||
afa53df8 | 2483 | dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); |
55b082e6 | 2484 | |
95d402f0 MP |
2485 | init_waitqueue_head(&c->free_buffer_wait); |
2486 | c->async_write_error = 0; | |
2487 | ||
2488 | c->dm_io = dm_io_client_create(); | |
2489 | if (IS_ERR(c->dm_io)) { | |
2490 | r = PTR_ERR(c->dm_io); | |
2491 | goto bad_dm_io; | |
2492 | } | |
2493 | ||
f51f2e0a MP |
2494 | if (block_size <= KMALLOC_MAX_SIZE && |
2495 | (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { | |
86a3238c | 2496 | unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE); |
0ef0b471 | 2497 | |
8d1058fb | 2498 | snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size); |
f7879b4c | 2499 | c->slab_cache = kmem_cache_create(slab_name, block_size, align, |
6b5e718c | 2500 | SLAB_RECLAIM_ACCOUNT, NULL); |
21bb1327 MP |
2501 | if (!c->slab_cache) { |
2502 | r = -ENOMEM; | |
2503 | goto bad; | |
95d402f0 MP |
2504 | } |
2505 | } | |
359dbf19 | 2506 | if (aux_size) |
8d1058fb | 2507 | snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size); |
359dbf19 | 2508 | else |
8d1058fb | 2509 | snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer"); |
359dbf19 MP |
2510 | c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, |
2511 | 0, SLAB_RECLAIM_ACCOUNT, NULL); | |
2512 | if (!c->slab_buffer) { | |
2513 | r = -ENOMEM; | |
2514 | goto bad; | |
2515 | } | |
95d402f0 MP |
2516 | |
2517 | while (c->need_reserved_buffers) { | |
2518 | struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); | |
2519 | ||
2520 | if (!b) { | |
2521 | r = -ENOMEM; | |
0e696d38 | 2522 | goto bad; |
95d402f0 MP |
2523 | } |
2524 | __free_buffer_wake(b); | |
2525 | } | |
2526 | ||
70704c33 MP |
2527 | INIT_WORK(&c->shrink_work, shrink_work); |
2528 | atomic_long_set(&c->need_shrink, 0); | |
2529 | ||
1f1d459c QZ |
2530 | c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)", |
2531 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); | |
2532 | if (!c->shrinker) { | |
2533 | r = -ENOMEM; | |
0e696d38 | 2534 | goto bad; |
1f1d459c QZ |
2535 | } |
2536 | ||
2537 | c->shrinker->count_objects = dm_bufio_shrink_count; | |
2538 | c->shrinker->scan_objects = dm_bufio_shrink_scan; | |
2539 | c->shrinker->seeks = 1; | |
2540 | c->shrinker->batch = 0; | |
2541 | c->shrinker->private_data = c; | |
2542 | ||
2543 | shrinker_register(c->shrinker); | |
46898e9a | 2544 | |
95d402f0 MP |
2545 | mutex_lock(&dm_bufio_clients_lock); |
2546 | dm_bufio_client_count++; | |
2547 | list_add(&c->client_list, &dm_bufio_all_clients); | |
2548 | __cache_size_refresh(); | |
2549 | mutex_unlock(&dm_bufio_clients_lock); | |
2550 | ||
95d402f0 MP |
2551 | return c; |
2552 | ||
0e696d38 | 2553 | bad: |
95d402f0 | 2554 | while (!list_empty(&c->reserved_buffers)) { |
450e8dee JT |
2555 | struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); |
2556 | ||
2557 | list_del(&b->lru.list); | |
95d402f0 MP |
2558 | free_buffer(b); |
2559 | } | |
21bb1327 | 2560 | kmem_cache_destroy(c->slab_cache); |
359dbf19 | 2561 | kmem_cache_destroy(c->slab_buffer); |
95d402f0 MP |
2562 | dm_io_client_destroy(c->dm_io); |
2563 | bad_dm_io: | |
bde14184 | 2564 | mutex_destroy(&c->lock); |
0dfc1f4c ZC |
2565 | if (c->no_sleep) |
2566 | static_branch_dec(&no_sleep_enabled); | |
95d402f0 MP |
2567 | kfree(c); |
2568 | bad_client: | |
2569 | return ERR_PTR(r); | |
2570 | } | |
2571 | EXPORT_SYMBOL_GPL(dm_bufio_client_create); | |
2572 | ||
2573 | /* | |
2574 | * Free the buffering interface. | |
2575 | * It is required that there are no references on any buffers. | |
2576 | */ | |
2577 | void dm_bufio_client_destroy(struct dm_bufio_client *c) | |
2578 | { | |
86a3238c | 2579 | unsigned int i; |
95d402f0 MP |
2580 | |
2581 | drop_buffers(c); | |
2582 | ||
1f1d459c | 2583 | shrinker_free(c->shrinker); |
70704c33 | 2584 | flush_work(&c->shrink_work); |
95d402f0 MP |
2585 | |
2586 | mutex_lock(&dm_bufio_clients_lock); | |
2587 | ||
2588 | list_del(&c->client_list); | |
2589 | dm_bufio_client_count--; | |
2590 | __cache_size_refresh(); | |
2591 | ||
2592 | mutex_unlock(&dm_bufio_clients_lock); | |
2593 | ||
555977dd | 2594 | WARN_ON(c->need_reserved_buffers); |
95d402f0 MP |
2595 | |
2596 | while (!list_empty(&c->reserved_buffers)) { | |
450e8dee JT |
2597 | struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); |
2598 | ||
2599 | list_del(&b->lru.list); | |
95d402f0 MP |
2600 | free_buffer(b); |
2601 | } | |
2602 | ||
2603 | for (i = 0; i < LIST_SIZE; i++) | |
450e8dee JT |
2604 | if (cache_count(&c->cache, i)) |
2605 | DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); | |
95d402f0 MP |
2606 | |
2607 | for (i = 0; i < LIST_SIZE; i++) | |
450e8dee | 2608 | WARN_ON(cache_count(&c->cache, i)); |
95d402f0 | 2609 | |
450e8dee | 2610 | cache_destroy(&c->cache); |
21bb1327 | 2611 | kmem_cache_destroy(c->slab_cache); |
359dbf19 | 2612 | kmem_cache_destroy(c->slab_buffer); |
95d402f0 | 2613 | dm_io_client_destroy(c->dm_io); |
bde14184 | 2614 | mutex_destroy(&c->lock); |
3c1c875d MS |
2615 | if (c->no_sleep) |
2616 | static_branch_dec(&no_sleep_enabled); | |
95d402f0 MP |
2617 | kfree(c); |
2618 | } | |
2619 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); | |
2620 | ||
d4830012 LL |
2621 | void dm_bufio_client_reset(struct dm_bufio_client *c) |
2622 | { | |
2623 | drop_buffers(c); | |
2624 | flush_work(&c->shrink_work); | |
2625 | } | |
2626 | EXPORT_SYMBOL_GPL(dm_bufio_client_reset); | |
2627 | ||
400a0bef MP |
2628 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) |
2629 | { | |
2630 | c->start = start; | |
2631 | } | |
2632 | EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); | |
2633 | ||
450e8dee JT |
2634 | /*--------------------------------------------------------------*/ |
2635 | ||
86a3238c | 2636 | static unsigned int get_max_age_hz(void) |
95d402f0 | 2637 | { |
86a3238c | 2638 | unsigned int max_age = READ_ONCE(dm_bufio_max_age); |
95d402f0 | 2639 | |
33096a78 JT |
2640 | if (max_age > UINT_MAX / HZ) |
2641 | max_age = UINT_MAX / HZ; | |
95d402f0 | 2642 | |
33096a78 JT |
2643 | return max_age * HZ; |
2644 | } | |
95d402f0 | 2645 | |
33096a78 JT |
2646 | static bool older_than(struct dm_buffer *b, unsigned long age_hz) |
2647 | { | |
450e8dee | 2648 | return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); |
33096a78 JT |
2649 | } |
2650 | ||
450e8dee JT |
2651 | struct evict_params { |
2652 | gfp_t gfp; | |
2653 | unsigned long age_hz; | |
2654 | ||
2655 | /* | |
2656 | * This gets updated with the largest last_accessed (ie. most | |
2657 | * recently used) of the evicted buffers. It will not be reinitialised | |
2658 | * by __evict_many(), so you can use it across multiple invocations. | |
2659 | */ | |
2660 | unsigned long last_accessed; | |
2661 | }; | |
2662 | ||
2663 | /* | |
2664 | * We may not be able to evict this buffer if IO pending or the client | |
2665 | * is still using it. | |
2666 | * | |
2667 | * And if GFP_NOFS is used, we must not do any I/O because we hold | |
2668 | * dm_bufio_clients_lock and we would risk deadlock if the I/O gets | |
2669 | * rerouted to different bufio client. | |
2670 | */ | |
2671 | static enum evict_result select_for_evict(struct dm_buffer *b, void *context) | |
2672 | { | |
2673 | struct evict_params *params = context; | |
2674 | ||
2675 | if (!(params->gfp & __GFP_FS) || | |
2676 | (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { | |
2677 | if (test_bit_acquire(B_READING, &b->state) || | |
2678 | test_bit(B_WRITING, &b->state) || | |
2679 | test_bit(B_DIRTY, &b->state)) | |
2680 | return ER_DONT_EVICT; | |
2681 | } | |
2682 | ||
2683 | return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP; | |
2684 | } | |
2685 | ||
2686 | static unsigned long __evict_many(struct dm_bufio_client *c, | |
2687 | struct evict_params *params, | |
2688 | int list_mode, unsigned long max_count) | |
33096a78 | 2689 | { |
450e8dee JT |
2690 | unsigned long count; |
2691 | unsigned long last_accessed; | |
2692 | struct dm_buffer *b; | |
2693 | ||
2694 | for (count = 0; count < max_count; count++) { | |
2695 | b = cache_evict(&c->cache, list_mode, select_for_evict, params); | |
2696 | if (!b) | |
2697 | break; | |
2698 | ||
2699 | last_accessed = READ_ONCE(b->last_accessed); | |
2700 | if (time_after_eq(params->last_accessed, last_accessed)) | |
2701 | params->last_accessed = last_accessed; | |
2702 | ||
2703 | __make_buffer_clean(b); | |
2704 | __free_buffer_wake(b); | |
2705 | ||
2706 | cond_resched(); | |
2707 | } | |
2708 | ||
2709 | return count; | |
2710 | } | |
2711 | ||
2712 | static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) | |
2713 | { | |
2714 | struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0}; | |
2715 | unsigned long retain = get_retain_buffers(c); | |
13840d38 | 2716 | unsigned long count; |
390020ad | 2717 | LIST_HEAD(write_list); |
33096a78 JT |
2718 | |
2719 | dm_bufio_lock(c); | |
2720 | ||
390020ad MP |
2721 | __check_watermark(c, &write_list); |
2722 | if (unlikely(!list_empty(&write_list))) { | |
2723 | dm_bufio_unlock(c); | |
2724 | __flush_write_list(&write_list); | |
2725 | dm_bufio_lock(c); | |
2726 | } | |
2727 | ||
450e8dee JT |
2728 | count = cache_total(&c->cache); |
2729 | if (count > retain) | |
2730 | __evict_many(c, ¶ms, LIST_CLEAN, count - retain); | |
33096a78 JT |
2731 | |
2732 | dm_bufio_unlock(c); | |
2733 | } | |
2734 | ||
450e8dee | 2735 | static void cleanup_old_buffers(void) |
6e913b28 | 2736 | { |
450e8dee JT |
2737 | unsigned long max_age_hz = get_max_age_hz(); |
2738 | struct dm_bufio_client *c; | |
6e913b28 MP |
2739 | |
2740 | mutex_lock(&dm_bufio_clients_lock); | |
2741 | ||
450e8dee | 2742 | __cache_size_refresh(); |
6e913b28 | 2743 | |
450e8dee JT |
2744 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) |
2745 | evict_old_buffers(c, max_age_hz); | |
6e913b28 | 2746 | |
450e8dee JT |
2747 | mutex_unlock(&dm_bufio_clients_lock); |
2748 | } | |
6e913b28 | 2749 | |
450e8dee JT |
2750 | static void work_fn(struct work_struct *w) |
2751 | { | |
2752 | cleanup_old_buffers(); | |
6e913b28 | 2753 | |
450e8dee JT |
2754 | queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, |
2755 | DM_BUFIO_WORK_TIMER_SECS * HZ); | |
2756 | } | |
6e913b28 | 2757 | |
450e8dee | 2758 | /*--------------------------------------------------------------*/ |
6e913b28 | 2759 | |
450e8dee JT |
2760 | /* |
2761 | * Global cleanup tries to evict the oldest buffers from across _all_ | |
2762 | * the clients. It does this by repeatedly evicting a few buffers from | |
2763 | * the client that holds the oldest buffer. It's approximate, but hopefully | |
2764 | * good enough. | |
2765 | */ | |
2766 | static struct dm_bufio_client *__pop_client(void) | |
2767 | { | |
2768 | struct list_head *h; | |
6e913b28 | 2769 | |
450e8dee JT |
2770 | if (list_empty(&dm_bufio_all_clients)) |
2771 | return NULL; | |
2772 | ||
2773 | h = dm_bufio_all_clients.next; | |
2774 | list_del(h); | |
2775 | return container_of(h, struct dm_bufio_client, client_list); | |
2776 | } | |
2777 | ||
2778 | /* | |
2779 | * Inserts the client in the global client list based on its | |
2780 | * 'oldest_buffer' field. | |
2781 | */ | |
2782 | static void __insert_client(struct dm_bufio_client *new_client) | |
2783 | { | |
2784 | struct dm_bufio_client *c; | |
2785 | struct list_head *h = dm_bufio_all_clients.next; | |
2786 | ||
2787 | while (h != &dm_bufio_all_clients) { | |
2788 | c = container_of(h, struct dm_bufio_client, client_list); | |
2789 | if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) | |
2790 | break; | |
2791 | h = h->next; | |
6e913b28 MP |
2792 | } |
2793 | ||
450e8dee JT |
2794 | list_add_tail(&new_client->client_list, h); |
2795 | } | |
6e913b28 | 2796 | |
450e8dee JT |
2797 | static unsigned long __evict_a_few(unsigned long nr_buffers) |
2798 | { | |
2799 | unsigned long count; | |
2800 | struct dm_bufio_client *c; | |
2801 | struct evict_params params = { | |
2802 | .gfp = GFP_KERNEL, | |
2803 | .age_hz = 0, | |
2804 | /* set to jiffies in case there are no buffers in this client */ | |
2805 | .last_accessed = jiffies | |
2806 | }; | |
6e913b28 | 2807 | |
450e8dee JT |
2808 | c = __pop_client(); |
2809 | if (!c) | |
2810 | return 0; | |
2811 | ||
2812 | dm_bufio_lock(c); | |
2813 | count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers); | |
2814 | dm_bufio_unlock(c); | |
2815 | ||
2816 | if (count) | |
2817 | c->oldest_buffer = params.last_accessed; | |
2818 | __insert_client(c); | |
2819 | ||
2820 | return count; | |
6e913b28 MP |
2821 | } |
2822 | ||
450e8dee | 2823 | static void check_watermarks(void) |
33096a78 | 2824 | { |
450e8dee | 2825 | LIST_HEAD(write_list); |
33096a78 JT |
2826 | struct dm_bufio_client *c; |
2827 | ||
2828 | mutex_lock(&dm_bufio_clients_lock); | |
450e8dee JT |
2829 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) { |
2830 | dm_bufio_lock(c); | |
2831 | __check_watermark(c, &write_list); | |
2832 | dm_bufio_unlock(c); | |
2833 | } | |
2834 | mutex_unlock(&dm_bufio_clients_lock); | |
33096a78 | 2835 | |
450e8dee JT |
2836 | __flush_write_list(&write_list); |
2837 | } | |
390020ad | 2838 | |
450e8dee JT |
2839 | static void evict_old(void) |
2840 | { | |
2841 | unsigned long threshold = dm_bufio_cache_size - | |
2842 | dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; | |
33096a78 | 2843 | |
450e8dee JT |
2844 | mutex_lock(&dm_bufio_clients_lock); |
2845 | while (dm_bufio_current_allocated > threshold) { | |
2846 | if (!__evict_a_few(64)) | |
2847 | break; | |
2848 | cond_resched(); | |
2849 | } | |
95d402f0 MP |
2850 | mutex_unlock(&dm_bufio_clients_lock); |
2851 | } | |
2852 | ||
450e8dee | 2853 | static void do_global_cleanup(struct work_struct *w) |
95d402f0 | 2854 | { |
450e8dee JT |
2855 | check_watermarks(); |
2856 | evict_old(); | |
95d402f0 MP |
2857 | } |
2858 | ||
a4a82ce3 HM |
2859 | /* |
2860 | *-------------------------------------------------------------- | |
95d402f0 | 2861 | * Module setup |
a4a82ce3 HM |
2862 | *-------------------------------------------------------------- |
2863 | */ | |
95d402f0 MP |
2864 | |
2865 | /* | |
2866 | * This is called only once for the whole dm_bufio module. | |
2867 | * It initializes memory limit. | |
2868 | */ | |
2869 | static int __init dm_bufio_init(void) | |
2870 | { | |
2871 | __u64 mem; | |
2872 | ||
4cb57ab4 MP |
2873 | dm_bufio_allocated_kmem_cache = 0; |
2874 | dm_bufio_allocated_get_free_pages = 0; | |
2875 | dm_bufio_allocated_vmalloc = 0; | |
2876 | dm_bufio_current_allocated = 0; | |
2877 | ||
ca79b0c2 | 2878 | mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), |
74d4108d | 2879 | DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; |
95d402f0 MP |
2880 | |
2881 | if (mem > ULONG_MAX) | |
2882 | mem = ULONG_MAX; | |
2883 | ||
2884 | #ifdef CONFIG_MMU | |
74d4108d EB |
2885 | if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) |
2886 | mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); | |
95d402f0 MP |
2887 | #endif |
2888 | ||
2889 | dm_bufio_default_cache_size = mem; | |
2890 | ||
2891 | mutex_lock(&dm_bufio_clients_lock); | |
2892 | __cache_size_refresh(); | |
2893 | mutex_unlock(&dm_bufio_clients_lock); | |
2894 | ||
edd1ea2a | 2895 | dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0); |
95d402f0 MP |
2896 | if (!dm_bufio_wq) |
2897 | return -ENOMEM; | |
2898 | ||
6e913b28 MP |
2899 | INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn); |
2900 | INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); | |
2901 | queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, | |
95d402f0 MP |
2902 | DM_BUFIO_WORK_TIMER_SECS * HZ); |
2903 | ||
2904 | return 0; | |
2905 | } | |
2906 | ||
2907 | /* | |
2908 | * This is called once when unloading the dm_bufio module. | |
2909 | */ | |
2910 | static void __exit dm_bufio_exit(void) | |
2911 | { | |
2912 | int bug = 0; | |
95d402f0 | 2913 | |
6e913b28 | 2914 | cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); |
95d402f0 MP |
2915 | destroy_workqueue(dm_bufio_wq); |
2916 | ||
95d402f0 MP |
2917 | if (dm_bufio_client_count) { |
2918 | DMCRIT("%s: dm_bufio_client_count leaked: %d", | |
2919 | __func__, dm_bufio_client_count); | |
2920 | bug = 1; | |
2921 | } | |
2922 | ||
2923 | if (dm_bufio_current_allocated) { | |
2924 | DMCRIT("%s: dm_bufio_current_allocated leaked: %lu", | |
2925 | __func__, dm_bufio_current_allocated); | |
2926 | bug = 1; | |
2927 | } | |
2928 | ||
2929 | if (dm_bufio_allocated_get_free_pages) { | |
2930 | DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu", | |
2931 | __func__, dm_bufio_allocated_get_free_pages); | |
2932 | bug = 1; | |
2933 | } | |
2934 | ||
2935 | if (dm_bufio_allocated_vmalloc) { | |
2936 | DMCRIT("%s: dm_bufio_vmalloc leaked: %lu", | |
2937 | __func__, dm_bufio_allocated_vmalloc); | |
2938 | bug = 1; | |
2939 | } | |
2940 | ||
555977dd | 2941 | WARN_ON(bug); /* leaks are not worth crashing the system */ |
95d402f0 MP |
2942 | } |
2943 | ||
2944 | module_init(dm_bufio_init) | |
2945 | module_exit(dm_bufio_exit) | |
2946 | ||
6a808034 | 2947 | module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644); |
95d402f0 MP |
2948 | MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); |
2949 | ||
6a808034 | 2950 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644); |
95d402f0 | 2951 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); |
33096a78 | 2952 | |
6a808034 | 2953 | module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644); |
33096a78 | 2954 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); |
95d402f0 | 2955 | |
6a808034 | 2956 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644); |
95d402f0 MP |
2957 | MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); |
2958 | ||
6a808034 | 2959 | module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444); |
95d402f0 MP |
2960 | MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); |
2961 | ||
6a808034 | 2962 | module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444); |
95d402f0 MP |
2963 | MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); |
2964 | ||
6a808034 | 2965 | module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444); |
95d402f0 MP |
2966 | MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc"); |
2967 | ||
6a808034 | 2968 | module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444); |
95d402f0 MP |
2969 | MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache"); |
2970 | ||
2971 | MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); | |
2972 | MODULE_DESCRIPTION(DM_NAME " buffered I/O library"); | |
2973 | MODULE_LICENSE("GPL"); |