Commit | Line | Data |
---|---|---|
3bd94003 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
95d402f0 MP |
2 | /* |
3 | * Copyright (C) 2009-2011 Red Hat, Inc. | |
4 | * | |
5 | * Author: Mikulas Patocka <mpatocka@redhat.com> | |
6 | * | |
7 | * This file is released under the GPL. | |
8 | */ | |
9 | ||
afa53df8 | 10 | #include <linux/dm-bufio.h> |
95d402f0 MP |
11 | |
12 | #include <linux/device-mapper.h> | |
13 | #include <linux/dm-io.h> | |
14 | #include <linux/slab.h> | |
5b3cc15a | 15 | #include <linux/sched/mm.h> |
f495339c | 16 | #include <linux/jiffies.h> |
95d402f0 | 17 | #include <linux/vmalloc.h> |
95d402f0 | 18 | #include <linux/shrinker.h> |
6f66263f | 19 | #include <linux/module.h> |
4e420c45 | 20 | #include <linux/rbtree.h> |
86bad0c7 | 21 | #include <linux/stacktrace.h> |
3c1c875d | 22 | #include <linux/jump_label.h> |
95d402f0 | 23 | |
1e84c4b7 MS |
24 | #include "dm.h" |
25 | ||
95d402f0 MP |
26 | #define DM_MSG_PREFIX "bufio" |
27 | ||
28 | /* | |
29 | * Memory management policy: | |
30 | * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory | |
31 | * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower). | |
32 | * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers. | |
33 | * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT | |
34 | * dirty buffers. | |
35 | */ | |
36 | #define DM_BUFIO_MIN_BUFFERS 8 | |
37 | ||
38 | #define DM_BUFIO_MEMORY_PERCENT 2 | |
39 | #define DM_BUFIO_VMALLOC_PERCENT 25 | |
b132ff33 | 40 | #define DM_BUFIO_WRITEBACK_RATIO 3 |
6e913b28 | 41 | #define DM_BUFIO_LOW_WATERMARK_RATIO 16 |
95d402f0 | 42 | |
95d402f0 | 43 | /* |
33096a78 | 44 | * The nr of bytes of cached data to keep around. |
95d402f0 | 45 | */ |
33096a78 | 46 | #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024) |
95d402f0 | 47 | |
1e3b21c6 MP |
48 | /* |
49 | * Align buffer writes to this boundary. | |
50 | * Tests show that SSDs have the highest IOPS when using 4k writes. | |
51 | */ | |
52 | #define DM_BUFIO_WRITE_ALIGN 4096 | |
53 | ||
95d402f0 MP |
54 | /* |
55 | * dm_buffer->list_mode | |
56 | */ | |
57 | #define LIST_CLEAN 0 | |
58 | #define LIST_DIRTY 1 | |
59 | #define LIST_SIZE 2 | |
60 | ||
a3d8f0a7 LW |
61 | #define SCAN_RESCHED_CYCLE 16 |
62 | ||
be845bab JT |
63 | /*--------------------------------------------------------------*/ |
64 | ||
65 | /* | |
66 | * Rather than use an LRU list, we use a clock algorithm where entries | |
67 | * are held in a circular list. When an entry is 'hit' a reference bit | |
68 | * is set. The least recently used entry is approximated by running a | |
69 | * cursor around the list selecting unreferenced entries. Referenced | |
70 | * entries have their reference bit cleared as the cursor passes them. | |
71 | */ | |
72 | struct lru_entry { | |
73 | struct list_head list; | |
74 | atomic_t referenced; | |
75 | }; | |
76 | ||
77 | struct lru_iter { | |
78 | struct lru *lru; | |
79 | struct list_head list; | |
80 | struct lru_entry *stop; | |
81 | struct lru_entry *e; | |
82 | }; | |
83 | ||
84 | struct lru { | |
85 | struct list_head *cursor; | |
86 | unsigned long count; | |
87 | ||
88 | struct list_head iterators; | |
89 | }; | |
90 | ||
91 | /*--------------*/ | |
92 | ||
93 | static void lru_init(struct lru *lru) | |
94 | { | |
95 | lru->cursor = NULL; | |
96 | lru->count = 0; | |
97 | INIT_LIST_HEAD(&lru->iterators); | |
98 | } | |
99 | ||
100 | static void lru_destroy(struct lru *lru) | |
101 | { | |
102 | WARN_ON_ONCE(lru->cursor); | |
103 | WARN_ON_ONCE(!list_empty(&lru->iterators)); | |
104 | } | |
105 | ||
106 | /* | |
107 | * Insert a new entry into the lru. | |
108 | */ | |
109 | static void lru_insert(struct lru *lru, struct lru_entry *le) | |
110 | { | |
111 | /* | |
112 | * Don't be tempted to set to 1, makes the lru aspect | |
113 | * perform poorly. | |
114 | */ | |
115 | atomic_set(&le->referenced, 0); | |
116 | ||
117 | if (lru->cursor) { | |
118 | list_add_tail(&le->list, lru->cursor); | |
119 | } else { | |
120 | INIT_LIST_HEAD(&le->list); | |
121 | lru->cursor = &le->list; | |
122 | } | |
123 | lru->count++; | |
124 | } | |
125 | ||
126 | /*--------------*/ | |
127 | ||
128 | /* | |
129 | * Convert a list_head pointer to an lru_entry pointer. | |
130 | */ | |
131 | static inline struct lru_entry *to_le(struct list_head *l) | |
132 | { | |
133 | return container_of(l, struct lru_entry, list); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Initialize an lru_iter and add it to the list of cursors in the lru. | |
138 | */ | |
139 | static void lru_iter_begin(struct lru *lru, struct lru_iter *it) | |
140 | { | |
141 | it->lru = lru; | |
142 | it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL; | |
143 | it->e = lru->cursor ? to_le(lru->cursor) : NULL; | |
144 | list_add(&it->list, &lru->iterators); | |
145 | } | |
146 | ||
147 | /* | |
148 | * Remove an lru_iter from the list of cursors in the lru. | |
149 | */ | |
150 | static inline void lru_iter_end(struct lru_iter *it) | |
151 | { | |
152 | list_del(&it->list); | |
153 | } | |
154 | ||
155 | /* Predicate function type to be used with lru_iter_next */ | |
156 | typedef bool (*iter_predicate)(struct lru_entry *le, void *context); | |
157 | ||
158 | /* | |
159 | * Advance the cursor to the next entry that passes the | |
160 | * predicate, and return that entry. Returns NULL if the | |
161 | * iteration is complete. | |
162 | */ | |
163 | static struct lru_entry *lru_iter_next(struct lru_iter *it, | |
164 | iter_predicate pred, void *context) | |
165 | { | |
166 | struct lru_entry *e; | |
167 | ||
168 | while (it->e) { | |
169 | e = it->e; | |
170 | ||
171 | /* advance the cursor */ | |
172 | if (it->e == it->stop) | |
173 | it->e = NULL; | |
174 | else | |
175 | it->e = to_le(it->e->list.next); | |
176 | ||
177 | if (pred(e, context)) | |
178 | return e; | |
179 | } | |
180 | ||
181 | return NULL; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Invalidate a specific lru_entry and update all cursors in | |
186 | * the lru accordingly. | |
187 | */ | |
188 | static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e) | |
189 | { | |
190 | struct lru_iter *it; | |
191 | ||
192 | list_for_each_entry(it, &lru->iterators, list) { | |
193 | /* Move c->e forwards if necc. */ | |
194 | if (it->e == e) { | |
195 | it->e = to_le(it->e->list.next); | |
196 | if (it->e == e) | |
197 | it->e = NULL; | |
198 | } | |
199 | ||
200 | /* Move it->stop backwards if necc. */ | |
201 | if (it->stop == e) { | |
202 | it->stop = to_le(it->stop->list.prev); | |
203 | if (it->stop == e) | |
204 | it->stop = NULL; | |
205 | } | |
206 | } | |
207 | } | |
208 | ||
209 | /*--------------*/ | |
210 | ||
211 | /* | |
212 | * Remove a specific entry from the lru. | |
213 | */ | |
214 | static void lru_remove(struct lru *lru, struct lru_entry *le) | |
215 | { | |
216 | lru_iter_invalidate(lru, le); | |
217 | if (lru->count == 1) { | |
218 | lru->cursor = NULL; | |
219 | } else { | |
220 | if (lru->cursor == &le->list) | |
221 | lru->cursor = lru->cursor->next; | |
222 | list_del(&le->list); | |
223 | } | |
224 | lru->count--; | |
225 | } | |
226 | ||
227 | /* | |
228 | * Mark as referenced. | |
229 | */ | |
230 | static inline void lru_reference(struct lru_entry *le) | |
231 | { | |
232 | atomic_set(&le->referenced, 1); | |
233 | } | |
234 | ||
235 | /*--------------*/ | |
236 | ||
237 | /* | |
238 | * Remove the least recently used entry (approx), that passes the predicate. | |
239 | * Returns NULL on failure. | |
240 | */ | |
241 | enum evict_result { | |
242 | ER_EVICT, | |
243 | ER_DONT_EVICT, | |
244 | ER_STOP, /* stop looking for something to evict */ | |
245 | }; | |
246 | ||
247 | typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context); | |
248 | ||
2a695062 | 249 | static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep) |
be845bab JT |
250 | { |
251 | unsigned long tested = 0; | |
252 | struct list_head *h = lru->cursor; | |
253 | struct lru_entry *le; | |
254 | ||
255 | if (!h) | |
256 | return NULL; | |
257 | /* | |
258 | * In the worst case we have to loop around twice. Once to clear | |
259 | * the reference flags, and then again to discover the predicate | |
260 | * fails for all entries. | |
261 | */ | |
262 | while (tested < lru->count) { | |
263 | le = container_of(h, struct lru_entry, list); | |
264 | ||
265 | if (atomic_read(&le->referenced)) { | |
266 | atomic_set(&le->referenced, 0); | |
267 | } else { | |
268 | tested++; | |
269 | switch (pred(le, context)) { | |
270 | case ER_EVICT: | |
271 | /* | |
272 | * Adjust the cursor, so we start the next | |
273 | * search from here. | |
274 | */ | |
275 | lru->cursor = le->list.next; | |
276 | lru_remove(lru, le); | |
277 | return le; | |
278 | ||
279 | case ER_DONT_EVICT: | |
280 | break; | |
281 | ||
282 | case ER_STOP: | |
283 | lru->cursor = le->list.next; | |
284 | return NULL; | |
285 | } | |
286 | } | |
287 | ||
288 | h = h->next; | |
289 | ||
2a695062 MP |
290 | if (!no_sleep) |
291 | cond_resched(); | |
be845bab JT |
292 | } |
293 | ||
294 | return NULL; | |
295 | } | |
296 | ||
297 | /*--------------------------------------------------------------*/ | |
298 | ||
2cd7a6d4 JT |
299 | /* |
300 | * Buffer state bits. | |
301 | */ | |
302 | #define B_READING 0 | |
303 | #define B_WRITING 1 | |
304 | #define B_DIRTY 2 | |
305 | ||
306 | /* | |
307 | * Describes how the block was allocated: | |
308 | * kmem_cache_alloc(), __get_free_pages() or vmalloc(). | |
309 | * See the comment at alloc_buffer_data. | |
310 | */ | |
311 | enum data_mode { | |
312 | DATA_MODE_SLAB = 0, | |
61a57254 MP |
313 | DATA_MODE_KMALLOC = 1, |
314 | DATA_MODE_GET_FREE_PAGES = 2, | |
315 | DATA_MODE_VMALLOC = 3, | |
316 | DATA_MODE_LIMIT = 4 | |
2cd7a6d4 JT |
317 | }; |
318 | ||
319 | struct dm_buffer { | |
450e8dee | 320 | /* protected by the locks in dm_buffer_cache */ |
2cd7a6d4 | 321 | struct rb_node node; |
2cd7a6d4 | 322 | |
450e8dee | 323 | /* immutable, so don't need protecting */ |
2cd7a6d4 JT |
324 | sector_t block; |
325 | void *data; | |
326 | unsigned char data_mode; /* DATA_MODE_* */ | |
327 | ||
450e8dee JT |
328 | /* |
329 | * These two fields are used in isolation, so do not need | |
330 | * a surrounding lock. | |
331 | */ | |
332 | atomic_t hold_count; | |
2cd7a6d4 JT |
333 | unsigned long last_accessed; |
334 | ||
450e8dee JT |
335 | /* |
336 | * Everything else is protected by the mutex in | |
337 | * dm_bufio_client | |
338 | */ | |
339 | unsigned long state; | |
340 | struct lru_entry lru; | |
2cd7a6d4 JT |
341 | unsigned char list_mode; /* LIST_* */ |
342 | blk_status_t read_error; | |
343 | blk_status_t write_error; | |
2cd7a6d4 JT |
344 | unsigned int dirty_start; |
345 | unsigned int dirty_end; | |
346 | unsigned int write_start; | |
347 | unsigned int write_end; | |
2cd7a6d4 | 348 | struct list_head write_list; |
450e8dee | 349 | struct dm_bufio_client *c; |
2cd7a6d4 JT |
350 | void (*end_io)(struct dm_buffer *b, blk_status_t bs); |
351 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING | |
352 | #define MAX_STACK 10 | |
353 | unsigned int stack_len; | |
354 | unsigned long stack_entries[MAX_STACK]; | |
355 | #endif | |
2cd7a6d4 JT |
356 | }; |
357 | ||
358 | /*--------------------------------------------------------------*/ | |
359 | ||
360 | /* | |
361 | * The buffer cache manages buffers, particularly: | |
362 | * - inc/dec of holder count | |
363 | * - setting the last_accessed field | |
364 | * - maintains clean/dirty state along with lru | |
365 | * - selecting buffers that match predicates | |
366 | * | |
367 | * It does *not* handle: | |
368 | * - allocation/freeing of buffers. | |
369 | * - IO | |
370 | * - Eviction or cache sizing. | |
371 | * | |
372 | * cache_get() and cache_put() are threadsafe, you do not need to | |
373 | * protect these calls with a surrounding mutex. All the other | |
374 | * methods are not threadsafe; they do use locking primitives, but | |
375 | * only enough to ensure get/put are threadsafe. | |
376 | */ | |
377 | ||
2cd7a6d4 | 378 | struct buffer_tree { |
2a695062 MP |
379 | union { |
380 | struct rw_semaphore lock; | |
381 | rwlock_t spinlock; | |
382 | } u; | |
2cd7a6d4 JT |
383 | struct rb_root root; |
384 | } ____cacheline_aligned_in_smp; | |
385 | ||
386 | struct dm_buffer_cache { | |
36c18b86 | 387 | struct lru lru[LIST_SIZE]; |
2cd7a6d4 JT |
388 | /* |
389 | * We spread entries across multiple trees to reduce contention | |
390 | * on the locks. | |
391 | */ | |
36c18b86 | 392 | unsigned int num_locks; |
2a695062 | 393 | bool no_sleep; |
1e84c4b7 | 394 | struct buffer_tree trees[]; |
2cd7a6d4 JT |
395 | }; |
396 | ||
2a695062 MP |
397 | static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled); |
398 | ||
36c18b86 | 399 | static inline unsigned int cache_index(sector_t block, unsigned int num_locks) |
2cd7a6d4 | 400 | { |
363b7fd7 | 401 | return dm_hash_locks_index(block, num_locks); |
2cd7a6d4 JT |
402 | } |
403 | ||
404 | static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) | |
405 | { | |
2a695062 MP |
406 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
407 | read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
408 | else | |
409 | down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
410 | } |
411 | ||
412 | static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) | |
413 | { | |
2a695062 MP |
414 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
415 | read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
416 | else | |
417 | up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
418 | } |
419 | ||
420 | static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) | |
421 | { | |
2a695062 MP |
422 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
423 | write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
424 | else | |
425 | down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
426 | } |
427 | ||
428 | static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) | |
429 | { | |
2a695062 MP |
430 | if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) |
431 | write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); | |
432 | else | |
433 | up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); | |
2cd7a6d4 JT |
434 | } |
435 | ||
79118806 JT |
436 | /* |
437 | * Sometimes we want to repeatedly get and drop locks as part of an iteration. | |
438 | * This struct helps avoid redundant drop and gets of the same lock. | |
439 | */ | |
440 | struct lock_history { | |
441 | struct dm_buffer_cache *cache; | |
442 | bool write; | |
443 | unsigned int previous; | |
36c18b86 | 444 | unsigned int no_previous; |
79118806 JT |
445 | }; |
446 | ||
447 | static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write) | |
448 | { | |
449 | lh->cache = cache; | |
450 | lh->write = write; | |
36c18b86 MS |
451 | lh->no_previous = cache->num_locks; |
452 | lh->previous = lh->no_previous; | |
79118806 JT |
453 | } |
454 | ||
455 | static void __lh_lock(struct lock_history *lh, unsigned int index) | |
456 | { | |
2a695062 MP |
457 | if (lh->write) { |
458 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
459 | write_lock_bh(&lh->cache->trees[index].u.spinlock); | |
460 | else | |
461 | down_write(&lh->cache->trees[index].u.lock); | |
462 | } else { | |
463 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
464 | read_lock_bh(&lh->cache->trees[index].u.spinlock); | |
465 | else | |
466 | down_read(&lh->cache->trees[index].u.lock); | |
467 | } | |
79118806 JT |
468 | } |
469 | ||
470 | static void __lh_unlock(struct lock_history *lh, unsigned int index) | |
471 | { | |
2a695062 MP |
472 | if (lh->write) { |
473 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
474 | write_unlock_bh(&lh->cache->trees[index].u.spinlock); | |
475 | else | |
476 | up_write(&lh->cache->trees[index].u.lock); | |
477 | } else { | |
478 | if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep) | |
479 | read_unlock_bh(&lh->cache->trees[index].u.spinlock); | |
480 | else | |
481 | up_read(&lh->cache->trees[index].u.lock); | |
482 | } | |
79118806 JT |
483 | } |
484 | ||
485 | /* | |
486 | * Make sure you call this since it will unlock the final lock. | |
487 | */ | |
488 | static void lh_exit(struct lock_history *lh) | |
489 | { | |
36c18b86 | 490 | if (lh->previous != lh->no_previous) { |
79118806 | 491 | __lh_unlock(lh, lh->previous); |
36c18b86 | 492 | lh->previous = lh->no_previous; |
79118806 JT |
493 | } |
494 | } | |
495 | ||
496 | /* | |
497 | * Named 'next' because there is no corresponding | |
498 | * 'up/unlock' call since it's done automatically. | |
499 | */ | |
500 | static void lh_next(struct lock_history *lh, sector_t b) | |
501 | { | |
36c18b86 | 502 | unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */ |
79118806 | 503 | |
36c18b86 | 504 | if (lh->previous != lh->no_previous) { |
79118806 JT |
505 | if (lh->previous != index) { |
506 | __lh_unlock(lh, lh->previous); | |
507 | __lh_lock(lh, index); | |
508 | lh->previous = index; | |
509 | } | |
510 | } else { | |
511 | __lh_lock(lh, index); | |
512 | lh->previous = index; | |
513 | } | |
514 | } | |
515 | ||
2cd7a6d4 JT |
516 | static inline struct dm_buffer *le_to_buffer(struct lru_entry *le) |
517 | { | |
518 | return container_of(le, struct dm_buffer, lru); | |
519 | } | |
520 | ||
521 | static struct dm_buffer *list_to_buffer(struct list_head *l) | |
522 | { | |
523 | struct lru_entry *le = list_entry(l, struct lru_entry, list); | |
524 | ||
2cd7a6d4 JT |
525 | return le_to_buffer(le); |
526 | } | |
527 | ||
2a695062 | 528 | static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) |
2cd7a6d4 JT |
529 | { |
530 | unsigned int i; | |
531 | ||
36c18b86 | 532 | bc->num_locks = num_locks; |
2a695062 | 533 | bc->no_sleep = no_sleep; |
36c18b86 MS |
534 | |
535 | for (i = 0; i < bc->num_locks; i++) { | |
2a695062 MP |
536 | if (no_sleep) |
537 | rwlock_init(&bc->trees[i].u.spinlock); | |
538 | else | |
539 | init_rwsem(&bc->trees[i].u.lock); | |
2cd7a6d4 JT |
540 | bc->trees[i].root = RB_ROOT; |
541 | } | |
542 | ||
543 | lru_init(&bc->lru[LIST_CLEAN]); | |
544 | lru_init(&bc->lru[LIST_DIRTY]); | |
545 | } | |
546 | ||
547 | static void cache_destroy(struct dm_buffer_cache *bc) | |
548 | { | |
549 | unsigned int i; | |
550 | ||
36c18b86 | 551 | for (i = 0; i < bc->num_locks; i++) |
2cd7a6d4 JT |
552 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); |
553 | ||
554 | lru_destroy(&bc->lru[LIST_CLEAN]); | |
555 | lru_destroy(&bc->lru[LIST_DIRTY]); | |
556 | } | |
557 | ||
558 | /*--------------*/ | |
559 | ||
560 | /* | |
561 | * not threadsafe, or racey depending how you look at it | |
562 | */ | |
563 | static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) | |
564 | { | |
565 | return bc->lru[list_mode].count; | |
566 | } | |
567 | ||
568 | static inline unsigned long cache_total(struct dm_buffer_cache *bc) | |
569 | { | |
570 | return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); | |
571 | } | |
572 | ||
573 | /*--------------*/ | |
574 | ||
575 | /* | |
576 | * Gets a specific buffer, indexed by block. | |
577 | * If the buffer is found then its holder count will be incremented and | |
578 | * lru_reference will be called. | |
579 | * | |
580 | * threadsafe | |
581 | */ | |
582 | static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block) | |
583 | { | |
584 | struct rb_node *n = root->rb_node; | |
585 | struct dm_buffer *b; | |
586 | ||
587 | while (n) { | |
588 | b = container_of(n, struct dm_buffer, node); | |
589 | ||
590 | if (b->block == block) | |
591 | return b; | |
592 | ||
593 | n = block < b->block ? n->rb_left : n->rb_right; | |
594 | } | |
595 | ||
596 | return NULL; | |
597 | } | |
598 | ||
599 | static void __cache_inc_buffer(struct dm_buffer *b) | |
600 | { | |
601 | atomic_inc(&b->hold_count); | |
602 | WRITE_ONCE(b->last_accessed, jiffies); | |
603 | } | |
604 | ||
605 | static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) | |
606 | { | |
607 | struct dm_buffer *b; | |
608 | ||
609 | cache_read_lock(bc, block); | |
36c18b86 | 610 | b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); |
2cd7a6d4 JT |
611 | if (b) { |
612 | lru_reference(&b->lru); | |
613 | __cache_inc_buffer(b); | |
614 | } | |
615 | cache_read_unlock(bc, block); | |
616 | ||
617 | return b; | |
618 | } | |
619 | ||
620 | /*--------------*/ | |
621 | ||
622 | /* | |
623 | * Returns true if the hold count hits zero. | |
624 | * threadsafe | |
625 | */ | |
626 | static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) | |
627 | { | |
628 | bool r; | |
629 | ||
630 | cache_read_lock(bc, b->block); | |
631 | BUG_ON(!atomic_read(&b->hold_count)); | |
632 | r = atomic_dec_and_test(&b->hold_count); | |
633 | cache_read_unlock(bc, b->block); | |
634 | ||
635 | return r; | |
636 | } | |
637 | ||
638 | /*--------------*/ | |
639 | ||
640 | typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *); | |
641 | ||
642 | /* | |
643 | * Evicts a buffer based on a predicate. The oldest buffer that | |
644 | * matches the predicate will be selected. In addition to the | |
645 | * predicate the hold_count of the selected buffer will be zero. | |
646 | */ | |
647 | struct evict_wrapper { | |
79118806 | 648 | struct lock_history *lh; |
2cd7a6d4 JT |
649 | b_predicate pred; |
650 | void *context; | |
651 | }; | |
652 | ||
653 | /* | |
654 | * Wraps the buffer predicate turning it into an lru predicate. Adds | |
655 | * extra test for hold_count. | |
656 | */ | |
657 | static enum evict_result __evict_pred(struct lru_entry *le, void *context) | |
658 | { | |
659 | struct evict_wrapper *w = context; | |
660 | struct dm_buffer *b = le_to_buffer(le); | |
661 | ||
79118806 JT |
662 | lh_next(w->lh, b->block); |
663 | ||
2cd7a6d4 JT |
664 | if (atomic_read(&b->hold_count)) |
665 | return ER_DONT_EVICT; | |
666 | ||
667 | return w->pred(b, w->context); | |
668 | } | |
669 | ||
79118806 JT |
670 | static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, |
671 | b_predicate pred, void *context, | |
672 | struct lock_history *lh) | |
2cd7a6d4 | 673 | { |
79118806 | 674 | struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; |
2cd7a6d4 JT |
675 | struct lru_entry *le; |
676 | struct dm_buffer *b; | |
677 | ||
2a695062 | 678 | le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); |
2cd7a6d4 JT |
679 | if (!le) |
680 | return NULL; | |
681 | ||
682 | b = le_to_buffer(le); | |
683 | /* __evict_pred will have locked the appropriate tree. */ | |
36c18b86 | 684 | rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); |
2cd7a6d4 JT |
685 | |
686 | return b; | |
687 | } | |
688 | ||
79118806 JT |
689 | static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, |
690 | b_predicate pred, void *context) | |
691 | { | |
692 | struct dm_buffer *b; | |
693 | struct lock_history lh; | |
694 | ||
695 | lh_init(&lh, bc, true); | |
696 | b = __cache_evict(bc, list_mode, pred, context, &lh); | |
697 | lh_exit(&lh); | |
698 | ||
699 | return b; | |
700 | } | |
701 | ||
2cd7a6d4 JT |
702 | /*--------------*/ |
703 | ||
704 | /* | |
705 | * Mark a buffer as clean or dirty. Not threadsafe. | |
706 | */ | |
707 | static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) | |
708 | { | |
709 | cache_write_lock(bc, b->block); | |
710 | if (list_mode != b->list_mode) { | |
711 | lru_remove(&bc->lru[b->list_mode], &b->lru); | |
712 | b->list_mode = list_mode; | |
713 | lru_insert(&bc->lru[b->list_mode], &b->lru); | |
714 | } | |
715 | cache_write_unlock(bc, b->block); | |
716 | } | |
717 | ||
718 | /*--------------*/ | |
719 | ||
720 | /* | |
721 | * Runs through the lru associated with 'old_mode', if the predicate matches then | |
722 | * it moves them to 'new_mode'. Not threadsafe. | |
723 | */ | |
79118806 JT |
724 | static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, |
725 | b_predicate pred, void *context, struct lock_history *lh) | |
2cd7a6d4 JT |
726 | { |
727 | struct lru_entry *le; | |
728 | struct dm_buffer *b; | |
79118806 | 729 | struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context}; |
2cd7a6d4 JT |
730 | |
731 | while (true) { | |
2a695062 | 732 | le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); |
2cd7a6d4 JT |
733 | if (!le) |
734 | break; | |
735 | ||
736 | b = le_to_buffer(le); | |
737 | b->list_mode = new_mode; | |
738 | lru_insert(&bc->lru[b->list_mode], &b->lru); | |
739 | } | |
740 | } | |
741 | ||
79118806 JT |
742 | static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, |
743 | b_predicate pred, void *context) | |
744 | { | |
745 | struct lock_history lh; | |
746 | ||
747 | lh_init(&lh, bc, true); | |
748 | __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); | |
749 | lh_exit(&lh); | |
750 | } | |
751 | ||
2cd7a6d4 JT |
752 | /*--------------*/ |
753 | ||
754 | /* | |
755 | * Iterates through all clean or dirty entries calling a function for each | |
756 | * entry. The callback may terminate the iteration early. Not threadsafe. | |
757 | */ | |
758 | ||
759 | /* | |
760 | * Iterator functions should return one of these actions to indicate | |
761 | * how the iteration should proceed. | |
762 | */ | |
763 | enum it_action { | |
764 | IT_NEXT, | |
765 | IT_COMPLETE, | |
766 | }; | |
767 | ||
768 | typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context); | |
769 | ||
79118806 JT |
770 | static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, |
771 | iter_fn fn, void *context, struct lock_history *lh) | |
2cd7a6d4 JT |
772 | { |
773 | struct lru *lru = &bc->lru[list_mode]; | |
774 | struct lru_entry *le, *first; | |
775 | ||
776 | if (!lru->cursor) | |
777 | return; | |
778 | ||
779 | first = le = to_le(lru->cursor); | |
780 | do { | |
781 | struct dm_buffer *b = le_to_buffer(le); | |
782 | ||
79118806 JT |
783 | lh_next(lh, b->block); |
784 | ||
2cd7a6d4 JT |
785 | switch (fn(b, context)) { |
786 | case IT_NEXT: | |
787 | break; | |
788 | ||
789 | case IT_COMPLETE: | |
790 | return; | |
791 | } | |
792 | cond_resched(); | |
793 | ||
794 | le = to_le(le->list.next); | |
795 | } while (le != first); | |
796 | } | |
797 | ||
79118806 JT |
798 | static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, |
799 | iter_fn fn, void *context) | |
800 | { | |
801 | struct lock_history lh; | |
802 | ||
803 | lh_init(&lh, bc, false); | |
804 | __cache_iterate(bc, list_mode, fn, context, &lh); | |
805 | lh_exit(&lh); | |
806 | } | |
807 | ||
2cd7a6d4 JT |
808 | /*--------------*/ |
809 | ||
810 | /* | |
811 | * Passes ownership of the buffer to the cache. Returns false if the | |
812 | * buffer was already present (in which case ownership does not pass). | |
813 | * eg, a race with another thread. | |
814 | * | |
815 | * Holder count should be 1 on insertion. | |
816 | * | |
817 | * Not threadsafe. | |
818 | */ | |
819 | static bool __cache_insert(struct rb_root *root, struct dm_buffer *b) | |
820 | { | |
821 | struct rb_node **new = &root->rb_node, *parent = NULL; | |
822 | struct dm_buffer *found; | |
823 | ||
824 | while (*new) { | |
825 | found = container_of(*new, struct dm_buffer, node); | |
826 | ||
827 | if (found->block == b->block) | |
828 | return false; | |
829 | ||
830 | parent = *new; | |
831 | new = b->block < found->block ? | |
832 | &found->node.rb_left : &found->node.rb_right; | |
833 | } | |
834 | ||
835 | rb_link_node(&b->node, parent, new); | |
836 | rb_insert_color(&b->node, root); | |
837 | ||
838 | return true; | |
839 | } | |
840 | ||
841 | static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) | |
842 | { | |
843 | bool r; | |
844 | ||
845 | if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE)) | |
846 | return false; | |
847 | ||
848 | cache_write_lock(bc, b->block); | |
849 | BUG_ON(atomic_read(&b->hold_count) != 1); | |
36c18b86 | 850 | r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); |
2cd7a6d4 JT |
851 | if (r) |
852 | lru_insert(&bc->lru[b->list_mode], &b->lru); | |
853 | cache_write_unlock(bc, b->block); | |
854 | ||
855 | return r; | |
856 | } | |
857 | ||
858 | /*--------------*/ | |
859 | ||
860 | /* | |
861 | * Removes buffer from cache, ownership of the buffer passes back to the caller. | |
862 | * Fails if the hold_count is not one (ie. the caller holds the only reference). | |
863 | * | |
864 | * Not threadsafe. | |
865 | */ | |
866 | static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) | |
867 | { | |
868 | bool r; | |
869 | ||
870 | cache_write_lock(bc, b->block); | |
871 | ||
872 | if (atomic_read(&b->hold_count) != 1) { | |
873 | r = false; | |
874 | } else { | |
875 | r = true; | |
36c18b86 | 876 | rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); |
2cd7a6d4 JT |
877 | lru_remove(&bc->lru[b->list_mode], &b->lru); |
878 | } | |
879 | ||
880 | cache_write_unlock(bc, b->block); | |
881 | ||
882 | return r; | |
883 | } | |
884 | ||
885 | /*--------------*/ | |
886 | ||
887 | typedef void (*b_release)(struct dm_buffer *); | |
888 | ||
889 | static struct dm_buffer *__find_next(struct rb_root *root, sector_t block) | |
890 | { | |
891 | struct rb_node *n = root->rb_node; | |
892 | struct dm_buffer *b; | |
893 | struct dm_buffer *best = NULL; | |
894 | ||
895 | while (n) { | |
896 | b = container_of(n, struct dm_buffer, node); | |
897 | ||
898 | if (b->block == block) | |
899 | return b; | |
900 | ||
901 | if (block <= b->block) { | |
902 | n = n->rb_left; | |
903 | best = b; | |
904 | } else { | |
905 | n = n->rb_right; | |
906 | } | |
907 | } | |
908 | ||
909 | return best; | |
910 | } | |
911 | ||
912 | static void __remove_range(struct dm_buffer_cache *bc, | |
913 | struct rb_root *root, | |
914 | sector_t begin, sector_t end, | |
915 | b_predicate pred, b_release release) | |
916 | { | |
917 | struct dm_buffer *b; | |
918 | ||
919 | while (true) { | |
920 | cond_resched(); | |
921 | ||
922 | b = __find_next(root, begin); | |
923 | if (!b || (b->block >= end)) | |
924 | break; | |
925 | ||
926 | begin = b->block + 1; | |
927 | ||
928 | if (atomic_read(&b->hold_count)) | |
929 | continue; | |
930 | ||
931 | if (pred(b, NULL) == ER_EVICT) { | |
932 | rb_erase(&b->node, root); | |
933 | lru_remove(&bc->lru[b->list_mode], &b->lru); | |
934 | release(b); | |
935 | } | |
936 | } | |
937 | } | |
938 | ||
939 | static void cache_remove_range(struct dm_buffer_cache *bc, | |
940 | sector_t begin, sector_t end, | |
941 | b_predicate pred, b_release release) | |
942 | { | |
943 | unsigned int i; | |
944 | ||
2a695062 | 945 | BUG_ON(bc->no_sleep); |
36c18b86 | 946 | for (i = 0; i < bc->num_locks; i++) { |
2a695062 | 947 | down_write(&bc->trees[i].u.lock); |
2cd7a6d4 | 948 | __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); |
2a695062 | 949 | up_write(&bc->trees[i].u.lock); |
2cd7a6d4 JT |
950 | } |
951 | } | |
952 | ||
953 | /*----------------------------------------------------------------*/ | |
954 | ||
95d402f0 MP |
955 | /* |
956 | * Linking of buffers: | |
450e8dee | 957 | * All buffers are linked to buffer_cache with their node field. |
95d402f0 MP |
958 | * |
959 | * Clean buffers that are not being written (B_WRITING not set) | |
960 | * are linked to lru[LIST_CLEAN] with their lru_list field. | |
961 | * | |
962 | * Dirty and clean buffers that are being written are linked to | |
963 | * lru[LIST_DIRTY] with their lru_list field. When the write | |
964 | * finishes, the buffer cannot be relinked immediately (because we | |
965 | * are in an interrupt context and relinking requires process | |
966 | * context), so some clean-not-writing buffers can be held on | |
967 | * dirty_lru too. They are later added to lru in the process | |
968 | * context. | |
969 | */ | |
970 | struct dm_bufio_client { | |
95d402f0 | 971 | struct block_device *bdev; |
86a3238c | 972 | unsigned int block_size; |
f51f2e0a | 973 | s8 sectors_per_block_bits; |
530f683d MS |
974 | |
975 | bool no_sleep; | |
976 | struct mutex lock; | |
977 | spinlock_t spinlock; | |
978 | ||
979 | int async_write_error; | |
980 | ||
02f10ba1 HM |
981 | void (*alloc_callback)(struct dm_buffer *buf); |
982 | void (*write_callback)(struct dm_buffer *buf); | |
359dbf19 | 983 | struct kmem_cache *slab_buffer; |
21bb1327 | 984 | struct kmem_cache *slab_cache; |
95d402f0 MP |
985 | struct dm_io_client *dm_io; |
986 | ||
987 | struct list_head reserved_buffers; | |
86a3238c | 988 | unsigned int need_reserved_buffers; |
95d402f0 | 989 | |
86a3238c | 990 | unsigned int minimum_buffers; |
55b082e6 | 991 | |
400a0bef MP |
992 | sector_t start; |
993 | ||
1f1d459c | 994 | struct shrinker *shrinker; |
70704c33 MP |
995 | struct work_struct shrink_work; |
996 | atomic_long_t need_shrink; | |
450e8dee | 997 | |
530f683d MS |
998 | wait_queue_head_t free_buffer_wait; |
999 | ||
1000 | struct list_head client_list; | |
1001 | ||
450e8dee JT |
1002 | /* |
1003 | * Used by global_cleanup to sort the clients list. | |
1004 | */ | |
1005 | unsigned long oldest_buffer; | |
530f683d | 1006 | |
1e84c4b7 | 1007 | struct dm_buffer_cache cache; /* must be last member */ |
95d402f0 MP |
1008 | }; |
1009 | ||
95d402f0 MP |
1010 | /*----------------------------------------------------------------*/ |
1011 | ||
95d402f0 MP |
1012 | #define dm_bufio_in_request() (!!current->bio_list) |
1013 | ||
1014 | static void dm_bufio_lock(struct dm_bufio_client *c) | |
1015 | { | |
3c1c875d | 1016 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
b33b6fdc | 1017 | spin_lock_bh(&c->spinlock); |
b32d4582 NH |
1018 | else |
1019 | mutex_lock_nested(&c->lock, dm_bufio_in_request()); | |
95d402f0 MP |
1020 | } |
1021 | ||
95d402f0 MP |
1022 | static void dm_bufio_unlock(struct dm_bufio_client *c) |
1023 | { | |
3c1c875d | 1024 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
b33b6fdc | 1025 | spin_unlock_bh(&c->spinlock); |
b32d4582 NH |
1026 | else |
1027 | mutex_unlock(&c->lock); | |
95d402f0 MP |
1028 | } |
1029 | ||
95d402f0 MP |
1030 | /*----------------------------------------------------------------*/ |
1031 | ||
1032 | /* | |
1033 | * Default cache size: available memory divided by the ratio. | |
1034 | */ | |
1035 | static unsigned long dm_bufio_default_cache_size; | |
1036 | ||
1037 | /* | |
1038 | * Total cache size set by the user. | |
1039 | */ | |
1040 | static unsigned long dm_bufio_cache_size; | |
1041 | ||
1042 | /* | |
1043 | * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change | |
1044 | * at any time. If it disagrees, the user has changed cache size. | |
1045 | */ | |
1046 | static unsigned long dm_bufio_cache_size_latch; | |
1047 | ||
af53badc MP |
1048 | static DEFINE_SPINLOCK(global_spinlock); |
1049 | ||
97693781 EB |
1050 | static unsigned int dm_bufio_max_age; /* No longer does anything */ |
1051 | ||
13840d38 | 1052 | static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
95d402f0 MP |
1053 | |
1054 | static unsigned long dm_bufio_peak_allocated; | |
1055 | static unsigned long dm_bufio_allocated_kmem_cache; | |
61a57254 | 1056 | static unsigned long dm_bufio_allocated_kmalloc; |
95d402f0 MP |
1057 | static unsigned long dm_bufio_allocated_get_free_pages; |
1058 | static unsigned long dm_bufio_allocated_vmalloc; | |
1059 | static unsigned long dm_bufio_current_allocated; | |
1060 | ||
1061 | /*----------------------------------------------------------------*/ | |
1062 | ||
95d402f0 MP |
1063 | /* |
1064 | * The current number of clients. | |
1065 | */ | |
1066 | static int dm_bufio_client_count; | |
1067 | ||
1068 | /* | |
1069 | * The list of all clients. | |
1070 | */ | |
1071 | static LIST_HEAD(dm_bufio_all_clients); | |
1072 | ||
1073 | /* | |
b132ff33 | 1074 | * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count |
95d402f0 MP |
1075 | */ |
1076 | static DEFINE_MUTEX(dm_bufio_clients_lock); | |
1077 | ||
6e913b28 | 1078 | static struct workqueue_struct *dm_bufio_wq; |
6e913b28 MP |
1079 | static struct work_struct dm_bufio_replacement_work; |
1080 | ||
1081 | ||
86bad0c7 MP |
1082 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
1083 | static void buffer_record_stack(struct dm_buffer *b) | |
1084 | { | |
741b58f3 | 1085 | b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); |
86bad0c7 MP |
1086 | } |
1087 | #endif | |
1088 | ||
95d402f0 MP |
1089 | /*----------------------------------------------------------------*/ |
1090 | ||
d0a328a3 | 1091 | static void adjust_total_allocated(struct dm_buffer *b, bool unlink) |
95d402f0 | 1092 | { |
d0a328a3 MP |
1093 | unsigned char data_mode; |
1094 | long diff; | |
1095 | ||
95d402f0 MP |
1096 | static unsigned long * const class_ptr[DATA_MODE_LIMIT] = { |
1097 | &dm_bufio_allocated_kmem_cache, | |
61a57254 | 1098 | &dm_bufio_allocated_kmalloc, |
95d402f0 MP |
1099 | &dm_bufio_allocated_get_free_pages, |
1100 | &dm_bufio_allocated_vmalloc, | |
1101 | }; | |
1102 | ||
d0a328a3 MP |
1103 | data_mode = b->data_mode; |
1104 | diff = (long)b->c->block_size; | |
1105 | if (unlink) | |
1106 | diff = -diff; | |
1107 | ||
af53badc | 1108 | spin_lock(&global_spinlock); |
95d402f0 MP |
1109 | |
1110 | *class_ptr[data_mode] += diff; | |
1111 | ||
1112 | dm_bufio_current_allocated += diff; | |
1113 | ||
1114 | if (dm_bufio_current_allocated > dm_bufio_peak_allocated) | |
1115 | dm_bufio_peak_allocated = dm_bufio_current_allocated; | |
1116 | ||
af53badc | 1117 | if (!unlink) { |
6e913b28 MP |
1118 | if (dm_bufio_current_allocated > dm_bufio_cache_size) |
1119 | queue_work(dm_bufio_wq, &dm_bufio_replacement_work); | |
af53badc MP |
1120 | } |
1121 | ||
1122 | spin_unlock(&global_spinlock); | |
95d402f0 MP |
1123 | } |
1124 | ||
1125 | /* | |
1126 | * Change the number of clients and recalculate per-client limit. | |
1127 | */ | |
1128 | static void __cache_size_refresh(void) | |
1129 | { | |
b75a80f4 MS |
1130 | if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock))) |
1131 | return; | |
1132 | if (WARN_ON(dm_bufio_client_count < 0)) | |
1133 | return; | |
95d402f0 | 1134 | |
6aa7de05 | 1135 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
95d402f0 MP |
1136 | |
1137 | /* | |
1138 | * Use default if set to 0 and report the actual cache size used. | |
1139 | */ | |
1140 | if (!dm_bufio_cache_size_latch) { | |
1141 | (void)cmpxchg(&dm_bufio_cache_size, 0, | |
1142 | dm_bufio_default_cache_size); | |
1143 | dm_bufio_cache_size_latch = dm_bufio_default_cache_size; | |
1144 | } | |
95d402f0 MP |
1145 | } |
1146 | ||
1147 | /* | |
1148 | * Allocating buffer data. | |
1149 | * | |
1150 | * Small buffers are allocated with kmem_cache, to use space optimally. | |
1151 | * | |
1152 | * For large buffers, we choose between get_free_pages and vmalloc. | |
1153 | * Each has advantages and disadvantages. | |
1154 | * | |
1155 | * __get_free_pages can randomly fail if the memory is fragmented. | |
1156 | * __vmalloc won't randomly fail, but vmalloc space is limited (it may be | |
1157 | * as low as 128M) so using it for caching is not appropriate. | |
1158 | * | |
1159 | * If the allocation may fail we use __get_free_pages. Memory fragmentation | |
1160 | * won't have a fatal effect here, but it just causes flushes of some other | |
1161 | * buffers and more I/O will be performed. Don't use __get_free_pages if it | |
5e0a760b | 1162 | * always fails (i.e. order > MAX_PAGE_ORDER). |
95d402f0 MP |
1163 | * |
1164 | * If the allocation shouldn't fail we use __vmalloc. This is only for the | |
1165 | * initial reserve allocation, so there's no risk of wasting all vmalloc | |
1166 | * space. | |
1167 | */ | |
1168 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | |
03b02939 | 1169 | unsigned char *data_mode) |
95d402f0 | 1170 | { |
21bb1327 | 1171 | if (unlikely(c->slab_cache != NULL)) { |
95d402f0 | 1172 | *data_mode = DATA_MODE_SLAB; |
21bb1327 | 1173 | return kmem_cache_alloc(c->slab_cache, gfp_mask); |
95d402f0 MP |
1174 | } |
1175 | ||
61a57254 MP |
1176 | if (unlikely(c->block_size < PAGE_SIZE)) { |
1177 | *data_mode = DATA_MODE_KMALLOC; | |
1178 | return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE); | |
1179 | } | |
1180 | ||
f51f2e0a | 1181 | if (c->block_size <= KMALLOC_MAX_SIZE && |
95d402f0 MP |
1182 | gfp_mask & __GFP_NORETRY) { |
1183 | *data_mode = DATA_MODE_GET_FREE_PAGES; | |
1184 | return (void *)__get_free_pages(gfp_mask, | |
f51f2e0a | 1185 | c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); |
95d402f0 MP |
1186 | } |
1187 | ||
1188 | *data_mode = DATA_MODE_VMALLOC; | |
502624bd | 1189 | |
88dca4ca | 1190 | return __vmalloc(c->block_size, gfp_mask); |
95d402f0 MP |
1191 | } |
1192 | ||
1193 | /* | |
1194 | * Free buffer's data. | |
1195 | */ | |
1196 | static void free_buffer_data(struct dm_bufio_client *c, | |
03b02939 | 1197 | void *data, unsigned char data_mode) |
95d402f0 MP |
1198 | { |
1199 | switch (data_mode) { | |
1200 | case DATA_MODE_SLAB: | |
21bb1327 | 1201 | kmem_cache_free(c->slab_cache, data); |
95d402f0 MP |
1202 | break; |
1203 | ||
61a57254 MP |
1204 | case DATA_MODE_KMALLOC: |
1205 | kfree(data); | |
1206 | break; | |
1207 | ||
95d402f0 | 1208 | case DATA_MODE_GET_FREE_PAGES: |
f51f2e0a MP |
1209 | free_pages((unsigned long)data, |
1210 | c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); | |
95d402f0 MP |
1211 | break; |
1212 | ||
1213 | case DATA_MODE_VMALLOC: | |
1214 | vfree(data); | |
1215 | break; | |
1216 | ||
1217 | default: | |
1218 | DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d", | |
1219 | data_mode); | |
1220 | BUG(); | |
1221 | } | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * Allocate buffer and its data. | |
1226 | */ | |
1227 | static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) | |
1228 | { | |
359dbf19 | 1229 | struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); |
95d402f0 MP |
1230 | |
1231 | if (!b) | |
1232 | return NULL; | |
1233 | ||
1234 | b->c = c; | |
1235 | ||
1236 | b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); | |
1237 | if (!b->data) { | |
359dbf19 | 1238 | kmem_cache_free(c->slab_buffer, b); |
95d402f0 MP |
1239 | return NULL; |
1240 | } | |
450e8dee | 1241 | adjust_total_allocated(b, false); |
95d402f0 | 1242 | |
86bad0c7 | 1243 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
741b58f3 | 1244 | b->stack_len = 0; |
86bad0c7 | 1245 | #endif |
95d402f0 MP |
1246 | return b; |
1247 | } | |
1248 | ||
1249 | /* | |
1250 | * Free buffer and its data. | |
1251 | */ | |
1252 | static void free_buffer(struct dm_buffer *b) | |
1253 | { | |
1254 | struct dm_bufio_client *c = b->c; | |
1255 | ||
450e8dee | 1256 | adjust_total_allocated(b, true); |
95d402f0 | 1257 | free_buffer_data(c, b->data, b->data_mode); |
359dbf19 | 1258 | kmem_cache_free(c->slab_buffer, b); |
95d402f0 MP |
1259 | } |
1260 | ||
a4a82ce3 HM |
1261 | /* |
1262 | *-------------------------------------------------------------------------- | |
95d402f0 MP |
1263 | * Submit I/O on the buffer. |
1264 | * | |
1265 | * Bio interface is faster but it has some problems: | |
1266 | * the vector list is limited (increasing this limit increases | |
1267 | * memory-consumption per buffer, so it is not viable); | |
1268 | * | |
1269 | * the memory must be direct-mapped, not vmalloced; | |
1270 | * | |
95d402f0 MP |
1271 | * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and |
1272 | * it is not vmalloced, try using the bio interface. | |
1273 | * | |
1274 | * If the buffer is big, if it is vmalloced or if the underlying device | |
1275 | * rejects the bio because it is too large, use dm-io layer to do the I/O. | |
1276 | * The dm-io layer splits the I/O into multiple requests, avoiding the above | |
1277 | * shortcomings. | |
a4a82ce3 HM |
1278 | *-------------------------------------------------------------------------- |
1279 | */ | |
95d402f0 MP |
1280 | |
1281 | /* | |
1282 | * dm-io completion routine. It just calls b->bio.bi_end_io, pretending | |
1283 | * that the request was handled directly with bio interface. | |
1284 | */ | |
1285 | static void dmio_complete(unsigned long error, void *context) | |
1286 | { | |
1287 | struct dm_buffer *b = context; | |
1288 | ||
45354f1e | 1289 | b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); |
95d402f0 MP |
1290 | } |
1291 | ||
a3282b43 | 1292 | static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, |
e9b2238e HJ |
1293 | unsigned int n_sectors, unsigned int offset, |
1294 | unsigned short ioprio) | |
95d402f0 MP |
1295 | { |
1296 | int r; | |
1297 | struct dm_io_request io_req = { | |
a3282b43 | 1298 | .bi_opf = op, |
95d402f0 MP |
1299 | .notify.fn = dmio_complete, |
1300 | .notify.context = b, | |
1301 | .client = b->c->dm_io, | |
1302 | }; | |
1303 | struct dm_io_region region = { | |
1304 | .bdev = b->c->bdev, | |
400a0bef MP |
1305 | .sector = sector, |
1306 | .count = n_sectors, | |
95d402f0 MP |
1307 | }; |
1308 | ||
1309 | if (b->data_mode != DATA_MODE_VMALLOC) { | |
1310 | io_req.mem.type = DM_IO_KMEM; | |
1e3b21c6 | 1311 | io_req.mem.ptr.addr = (char *)b->data + offset; |
95d402f0 MP |
1312 | } else { |
1313 | io_req.mem.type = DM_IO_VMA; | |
1e3b21c6 | 1314 | io_req.mem.ptr.vma = (char *)b->data + offset; |
95d402f0 MP |
1315 | } |
1316 | ||
e9b2238e | 1317 | r = dm_io(&io_req, 1, ®ion, NULL, ioprio); |
45354f1e MP |
1318 | if (unlikely(r)) |
1319 | b->end_io(b, errno_to_blk_status(r)); | |
95d402f0 MP |
1320 | } |
1321 | ||
45354f1e | 1322 | static void bio_complete(struct bio *bio) |
445559cd | 1323 | { |
45354f1e | 1324 | struct dm_buffer *b = bio->bi_private; |
4e4cbee9 | 1325 | blk_status_t status = bio->bi_status; |
0ef0b471 | 1326 | |
066ff571 CH |
1327 | bio_uninit(bio); |
1328 | kfree(bio); | |
45354f1e | 1329 | b->end_io(b, status); |
445559cd DW |
1330 | } |
1331 | ||
a3282b43 | 1332 | static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector, |
e9b2238e HJ |
1333 | unsigned int n_sectors, unsigned int offset, |
1334 | unsigned short ioprio) | |
95d402f0 | 1335 | { |
45354f1e | 1336 | struct bio *bio; |
95d402f0 | 1337 | char *ptr; |
56c5de44 | 1338 | unsigned int len; |
95d402f0 | 1339 | |
56c5de44 | 1340 | bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); |
45354f1e | 1341 | if (!bio) { |
e9b2238e | 1342 | use_dmio(b, op, sector, n_sectors, offset, ioprio); |
45354f1e MP |
1343 | return; |
1344 | } | |
56c5de44 | 1345 | bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op); |
45354f1e | 1346 | bio->bi_iter.bi_sector = sector; |
45354f1e MP |
1347 | bio->bi_end_io = bio_complete; |
1348 | bio->bi_private = b; | |
e9b2238e | 1349 | bio->bi_ioprio = ioprio; |
95d402f0 | 1350 | |
1e3b21c6 | 1351 | ptr = (char *)b->data + offset; |
400a0bef | 1352 | len = n_sectors << SECTOR_SHIFT; |
95d402f0 | 1353 | |
9134124c | 1354 | bio_add_virt_nofail(bio, ptr, len); |
95d402f0 | 1355 | |
45354f1e | 1356 | submit_bio(bio); |
95d402f0 MP |
1357 | } |
1358 | ||
6fbeb004 MP |
1359 | static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block) |
1360 | { | |
1361 | sector_t sector; | |
1362 | ||
1363 | if (likely(c->sectors_per_block_bits >= 0)) | |
1364 | sector = block << c->sectors_per_block_bits; | |
1365 | else | |
1366 | sector = block * (c->block_size >> SECTOR_SHIFT); | |
1367 | sector += c->start; | |
1368 | ||
1369 | return sector; | |
1370 | } | |
1371 | ||
e9b2238e | 1372 | static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio, |
a3282b43 | 1373 | void (*end_io)(struct dm_buffer *, blk_status_t)) |
95d402f0 | 1374 | { |
86a3238c | 1375 | unsigned int n_sectors; |
400a0bef | 1376 | sector_t sector; |
86a3238c | 1377 | unsigned int offset, end; |
95d402f0 | 1378 | |
45354f1e MP |
1379 | b->end_io = end_io; |
1380 | ||
6fbeb004 | 1381 | sector = block_to_sector(b->c, b->block); |
1e3b21c6 | 1382 | |
a3282b43 | 1383 | if (op != REQ_OP_WRITE) { |
f51f2e0a | 1384 | n_sectors = b->c->block_size >> SECTOR_SHIFT; |
1e3b21c6 MP |
1385 | offset = 0; |
1386 | } else { | |
1387 | if (b->c->write_callback) | |
1388 | b->c->write_callback(b); | |
1389 | offset = b->write_start; | |
1390 | end = b->write_end; | |
1391 | offset &= -DM_BUFIO_WRITE_ALIGN; | |
1392 | end += DM_BUFIO_WRITE_ALIGN - 1; | |
1393 | end &= -DM_BUFIO_WRITE_ALIGN; | |
1394 | if (unlikely(end > b->c->block_size)) | |
1395 | end = b->c->block_size; | |
1396 | ||
1397 | sector += offset >> SECTOR_SHIFT; | |
1398 | n_sectors = (end - offset) >> SECTOR_SHIFT; | |
1399 | } | |
400a0bef | 1400 | |
45354f1e | 1401 | if (b->data_mode != DATA_MODE_VMALLOC) |
e9b2238e | 1402 | use_bio(b, op, sector, n_sectors, offset, ioprio); |
95d402f0 | 1403 | else |
e9b2238e | 1404 | use_dmio(b, op, sector, n_sectors, offset, ioprio); |
95d402f0 MP |
1405 | } |
1406 | ||
a4a82ce3 HM |
1407 | /* |
1408 | *-------------------------------------------------------------- | |
95d402f0 | 1409 | * Writing dirty buffers |
a4a82ce3 HM |
1410 | *-------------------------------------------------------------- |
1411 | */ | |
95d402f0 MP |
1412 | |
1413 | /* | |
1414 | * The endio routine for write. | |
1415 | * | |
1416 | * Set the error, clear B_WRITING bit and wake anyone who was waiting on | |
1417 | * it. | |
1418 | */ | |
45354f1e | 1419 | static void write_endio(struct dm_buffer *b, blk_status_t status) |
95d402f0 | 1420 | { |
45354f1e MP |
1421 | b->write_error = status; |
1422 | if (unlikely(status)) { | |
95d402f0 | 1423 | struct dm_bufio_client *c = b->c; |
4e4cbee9 CH |
1424 | |
1425 | (void)cmpxchg(&c->async_write_error, 0, | |
45354f1e | 1426 | blk_status_to_errno(status)); |
95d402f0 MP |
1427 | } |
1428 | ||
1429 | BUG_ON(!test_bit(B_WRITING, &b->state)); | |
1430 | ||
4e857c58 | 1431 | smp_mb__before_atomic(); |
95d402f0 | 1432 | clear_bit(B_WRITING, &b->state); |
4e857c58 | 1433 | smp_mb__after_atomic(); |
95d402f0 MP |
1434 | |
1435 | wake_up_bit(&b->state, B_WRITING); | |
1436 | } | |
1437 | ||
95d402f0 MP |
1438 | /* |
1439 | * Initiate a write on a dirty buffer, but don't wait for it. | |
1440 | * | |
1441 | * - If the buffer is not dirty, exit. | |
1442 | * - If there some previous write going on, wait for it to finish (we can't | |
1443 | * have two writes on the same buffer simultaneously). | |
1444 | * - Submit our write and don't wait on it. We set B_WRITING indicating | |
1445 | * that there is a write in progress. | |
1446 | */ | |
2480945c MP |
1447 | static void __write_dirty_buffer(struct dm_buffer *b, |
1448 | struct list_head *write_list) | |
95d402f0 MP |
1449 | { |
1450 | if (!test_bit(B_DIRTY, &b->state)) | |
1451 | return; | |
1452 | ||
1453 | clear_bit(B_DIRTY, &b->state); | |
74316201 | 1454 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
95d402f0 | 1455 | |
1e3b21c6 MP |
1456 | b->write_start = b->dirty_start; |
1457 | b->write_end = b->dirty_end; | |
1458 | ||
2480945c | 1459 | if (!write_list) |
e9b2238e | 1460 | submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio); |
2480945c MP |
1461 | else |
1462 | list_add_tail(&b->write_list, write_list); | |
1463 | } | |
1464 | ||
1465 | static void __flush_write_list(struct list_head *write_list) | |
1466 | { | |
1467 | struct blk_plug plug; | |
0ef0b471 | 1468 | |
2480945c MP |
1469 | blk_start_plug(&plug); |
1470 | while (!list_empty(write_list)) { | |
1471 | struct dm_buffer *b = | |
1472 | list_entry(write_list->next, struct dm_buffer, write_list); | |
1473 | list_del(&b->write_list); | |
e9b2238e | 1474 | submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio); |
7cd32674 | 1475 | cond_resched(); |
2480945c MP |
1476 | } |
1477 | blk_finish_plug(&plug); | |
95d402f0 MP |
1478 | } |
1479 | ||
1480 | /* | |
1481 | * Wait until any activity on the buffer finishes. Possibly write the | |
1482 | * buffer if it is dirty. When this function finishes, there is no I/O | |
1483 | * running on the buffer and the buffer is not dirty. | |
1484 | */ | |
1485 | static void __make_buffer_clean(struct dm_buffer *b) | |
1486 | { | |
450e8dee | 1487 | BUG_ON(atomic_read(&b->hold_count)); |
95d402f0 | 1488 | |
141b3523 MP |
1489 | /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ |
1490 | if (!smp_load_acquire(&b->state)) /* fast case */ | |
95d402f0 MP |
1491 | return; |
1492 | ||
74316201 | 1493 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
2480945c | 1494 | __write_dirty_buffer(b, NULL); |
74316201 | 1495 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
95d402f0 MP |
1496 | } |
1497 | ||
450e8dee JT |
1498 | static enum evict_result is_clean(struct dm_buffer *b, void *context) |
1499 | { | |
1500 | struct dm_bufio_client *c = context; | |
1501 | ||
1502 | /* These should never happen */ | |
1503 | if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state))) | |
1504 | return ER_DONT_EVICT; | |
1505 | if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state))) | |
1506 | return ER_DONT_EVICT; | |
1507 | if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN)) | |
1508 | return ER_DONT_EVICT; | |
1509 | ||
1510 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && | |
1511 | unlikely(test_bit(B_READING, &b->state))) | |
1512 | return ER_DONT_EVICT; | |
1513 | ||
1514 | return ER_EVICT; | |
1515 | } | |
1516 | ||
1517 | static enum evict_result is_dirty(struct dm_buffer *b, void *context) | |
1518 | { | |
1519 | /* These should never happen */ | |
1520 | if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) | |
1521 | return ER_DONT_EVICT; | |
1522 | if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY)) | |
1523 | return ER_DONT_EVICT; | |
1524 | ||
1525 | return ER_EVICT; | |
1526 | } | |
1527 | ||
95d402f0 MP |
1528 | /* |
1529 | * Find some buffer that is not held by anybody, clean it, unlink it and | |
1530 | * return it. | |
1531 | */ | |
1532 | static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) | |
1533 | { | |
1534 | struct dm_buffer *b; | |
1535 | ||
450e8dee JT |
1536 | b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c); |
1537 | if (b) { | |
1538 | /* this also waits for pending reads */ | |
1539 | __make_buffer_clean(b); | |
1540 | return b; | |
95d402f0 MP |
1541 | } |
1542 | ||
e3a7c294 MP |
1543 | if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep) |
1544 | return NULL; | |
1545 | ||
450e8dee JT |
1546 | b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL); |
1547 | if (b) { | |
1548 | __make_buffer_clean(b); | |
1549 | return b; | |
95d402f0 MP |
1550 | } |
1551 | ||
1552 | return NULL; | |
1553 | } | |
1554 | ||
1555 | /* | |
1556 | * Wait until some other threads free some buffer or release hold count on | |
1557 | * some buffer. | |
1558 | * | |
1559 | * This function is entered with c->lock held, drops it and regains it | |
1560 | * before exiting. | |
1561 | */ | |
1562 | static void __wait_for_free_buffer(struct dm_bufio_client *c) | |
1563 | { | |
1564 | DECLARE_WAITQUEUE(wait, current); | |
1565 | ||
1566 | add_wait_queue(&c->free_buffer_wait, &wait); | |
642fa448 | 1567 | set_current_state(TASK_UNINTERRUPTIBLE); |
95d402f0 MP |
1568 | dm_bufio_unlock(c); |
1569 | ||
450e8dee JT |
1570 | /* |
1571 | * It's possible to miss a wake up event since we don't always | |
1572 | * hold c->lock when wake_up is called. So we have a timeout here, | |
1573 | * just in case. | |
1574 | */ | |
1575 | io_schedule_timeout(5 * HZ); | |
95d402f0 | 1576 | |
95d402f0 MP |
1577 | remove_wait_queue(&c->free_buffer_wait, &wait); |
1578 | ||
1579 | dm_bufio_lock(c); | |
1580 | } | |
1581 | ||
a66cc28f MP |
1582 | enum new_flag { |
1583 | NF_FRESH = 0, | |
1584 | NF_READ = 1, | |
1585 | NF_GET = 2, | |
1586 | NF_PREFETCH = 3 | |
1587 | }; | |
1588 | ||
95d402f0 MP |
1589 | /* |
1590 | * Allocate a new buffer. If the allocation is not possible, wait until | |
1591 | * some other thread frees a buffer. | |
1592 | * | |
1593 | * May drop the lock and regain it. | |
1594 | */ | |
a66cc28f | 1595 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) |
95d402f0 MP |
1596 | { |
1597 | struct dm_buffer *b; | |
41c73a49 | 1598 | bool tried_noio_alloc = false; |
95d402f0 MP |
1599 | |
1600 | /* | |
1601 | * dm-bufio is resistant to allocation failures (it just keeps | |
1602 | * one buffer reserved in cases all the allocations fail). | |
1603 | * So set flags to not try too hard: | |
9ea61cac DA |
1604 | * GFP_NOWAIT: don't wait; if we need to sleep we'll release our |
1605 | * mutex and wait ourselves. | |
95d402f0 MP |
1606 | * __GFP_NORETRY: don't retry and rather return failure |
1607 | * __GFP_NOMEMALLOC: don't use emergency reserves | |
1608 | * __GFP_NOWARN: don't print a warning in case of failure | |
1609 | * | |
1610 | * For debugging, if we set the cache size to 1, no new buffers will | |
1611 | * be allocated. | |
1612 | */ | |
1613 | while (1) { | |
1614 | if (dm_bufio_cache_size_latch != 1) { | |
9ea61cac | 1615 | b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); |
95d402f0 MP |
1616 | if (b) |
1617 | return b; | |
1618 | } | |
1619 | ||
a66cc28f MP |
1620 | if (nf == NF_PREFETCH) |
1621 | return NULL; | |
1622 | ||
41c73a49 MP |
1623 | if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { |
1624 | dm_bufio_unlock(c); | |
1625 | b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); | |
1626 | dm_bufio_lock(c); | |
1627 | if (b) | |
1628 | return b; | |
1629 | tried_noio_alloc = true; | |
1630 | } | |
1631 | ||
95d402f0 | 1632 | if (!list_empty(&c->reserved_buffers)) { |
450e8dee JT |
1633 | b = list_to_buffer(c->reserved_buffers.next); |
1634 | list_del(&b->lru.list); | |
95d402f0 MP |
1635 | c->need_reserved_buffers++; |
1636 | ||
1637 | return b; | |
1638 | } | |
1639 | ||
1640 | b = __get_unclaimed_buffer(c); | |
1641 | if (b) | |
1642 | return b; | |
1643 | ||
1644 | __wait_for_free_buffer(c); | |
1645 | } | |
1646 | } | |
1647 | ||
a66cc28f | 1648 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) |
95d402f0 | 1649 | { |
a66cc28f MP |
1650 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); |
1651 | ||
1652 | if (!b) | |
1653 | return NULL; | |
95d402f0 MP |
1654 | |
1655 | if (c->alloc_callback) | |
1656 | c->alloc_callback(b); | |
1657 | ||
1658 | return b; | |
1659 | } | |
1660 | ||
1661 | /* | |
1662 | * Free a buffer and wake other threads waiting for free buffers. | |
1663 | */ | |
1664 | static void __free_buffer_wake(struct dm_buffer *b) | |
1665 | { | |
1666 | struct dm_bufio_client *c = b->c; | |
1667 | ||
450e8dee | 1668 | b->block = -1; |
95d402f0 MP |
1669 | if (!c->need_reserved_buffers) |
1670 | free_buffer(b); | |
1671 | else { | |
450e8dee | 1672 | list_add(&b->lru.list, &c->reserved_buffers); |
95d402f0 MP |
1673 | c->need_reserved_buffers--; |
1674 | } | |
1675 | ||
f5f93541 MP |
1676 | /* |
1677 | * We hold the bufio lock here, so no one can add entries to the | |
1678 | * wait queue anyway. | |
1679 | */ | |
1680 | if (unlikely(waitqueue_active(&c->free_buffer_wait))) | |
1681 | wake_up(&c->free_buffer_wait); | |
95d402f0 MP |
1682 | } |
1683 | ||
450e8dee | 1684 | static enum evict_result cleaned(struct dm_buffer *b, void *context) |
95d402f0 | 1685 | { |
450e8dee JT |
1686 | if (WARN_ON_ONCE(test_bit(B_READING, &b->state))) |
1687 | return ER_DONT_EVICT; /* should never happen */ | |
95d402f0 | 1688 | |
450e8dee JT |
1689 | if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state)) |
1690 | return ER_DONT_EVICT; | |
1691 | else | |
1692 | return ER_EVICT; | |
1693 | } | |
95d402f0 | 1694 | |
450e8dee JT |
1695 | static void __move_clean_buffers(struct dm_bufio_client *c) |
1696 | { | |
1697 | cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL); | |
1698 | } | |
95d402f0 | 1699 | |
450e8dee JT |
1700 | struct write_context { |
1701 | int no_wait; | |
1702 | struct list_head *write_list; | |
1703 | }; | |
95d402f0 | 1704 | |
450e8dee JT |
1705 | static enum it_action write_one(struct dm_buffer *b, void *context) |
1706 | { | |
1707 | struct write_context *wc = context; | |
1708 | ||
1709 | if (wc->no_wait && test_bit(B_WRITING, &b->state)) | |
1710 | return IT_COMPLETE; | |
1711 | ||
1712 | __write_dirty_buffer(b, wc->write_list); | |
1713 | return IT_NEXT; | |
1714 | } | |
1715 | ||
1716 | static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, | |
1717 | struct list_head *write_list) | |
1718 | { | |
1719 | struct write_context wc = {.no_wait = no_wait, .write_list = write_list}; | |
1720 | ||
1721 | __move_clean_buffers(c); | |
1722 | cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc); | |
95d402f0 MP |
1723 | } |
1724 | ||
95d402f0 MP |
1725 | /* |
1726 | * Check if we're over watermark. | |
1727 | * If we are over threshold_buffers, start freeing buffers. | |
1728 | * If we're over "limit_buffers", block until we get under the limit. | |
1729 | */ | |
2480945c MP |
1730 | static void __check_watermark(struct dm_bufio_client *c, |
1731 | struct list_head *write_list) | |
95d402f0 | 1732 | { |
450e8dee JT |
1733 | if (cache_count(&c->cache, LIST_DIRTY) > |
1734 | cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO) | |
2480945c | 1735 | __write_dirty_buffers_async(c, 1, write_list); |
95d402f0 MP |
1736 | } |
1737 | ||
a4a82ce3 HM |
1738 | /* |
1739 | *-------------------------------------------------------------- | |
95d402f0 | 1740 | * Getting a buffer |
a4a82ce3 HM |
1741 | *-------------------------------------------------------------- |
1742 | */ | |
95d402f0 | 1743 | |
450e8dee JT |
1744 | static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b) |
1745 | { | |
1746 | /* | |
1747 | * Relying on waitqueue_active() is racey, but we sleep | |
1748 | * with schedule_timeout anyway. | |
1749 | */ | |
1750 | if (cache_put(&c->cache, b) && | |
1751 | unlikely(waitqueue_active(&c->free_buffer_wait))) | |
1752 | wake_up(&c->free_buffer_wait); | |
1753 | } | |
1754 | ||
1755 | /* | |
1756 | * This assumes you have already checked the cache to see if the buffer | |
1757 | * is already present (it will recheck after dropping the lock for allocation). | |
1758 | */ | |
95d402f0 | 1759 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, |
2480945c MP |
1760 | enum new_flag nf, int *need_submit, |
1761 | struct list_head *write_list) | |
95d402f0 MP |
1762 | { |
1763 | struct dm_buffer *b, *new_b = NULL; | |
1764 | ||
1765 | *need_submit = 0; | |
1766 | ||
450e8dee JT |
1767 | /* This can't be called with NF_GET */ |
1768 | if (WARN_ON_ONCE(nf == NF_GET)) | |
95d402f0 MP |
1769 | return NULL; |
1770 | ||
a66cc28f MP |
1771 | new_b = __alloc_buffer_wait(c, nf); |
1772 | if (!new_b) | |
1773 | return NULL; | |
95d402f0 MP |
1774 | |
1775 | /* | |
1776 | * We've had a period where the mutex was unlocked, so need to | |
ef992373 | 1777 | * recheck the buffer tree. |
95d402f0 | 1778 | */ |
450e8dee | 1779 | b = cache_get(&c->cache, block); |
95d402f0 MP |
1780 | if (b) { |
1781 | __free_buffer_wake(new_b); | |
a66cc28f | 1782 | goto found_buffer; |
95d402f0 MP |
1783 | } |
1784 | ||
2480945c | 1785 | __check_watermark(c, write_list); |
95d402f0 MP |
1786 | |
1787 | b = new_b; | |
450e8dee JT |
1788 | atomic_set(&b->hold_count, 1); |
1789 | WRITE_ONCE(b->last_accessed, jiffies); | |
1790 | b->block = block; | |
95d402f0 MP |
1791 | b->read_error = 0; |
1792 | b->write_error = 0; | |
450e8dee | 1793 | b->list_mode = LIST_CLEAN; |
95d402f0 | 1794 | |
450e8dee | 1795 | if (nf == NF_FRESH) |
95d402f0 | 1796 | b->state = 0; |
450e8dee JT |
1797 | else { |
1798 | b->state = 1 << B_READING; | |
1799 | *need_submit = 1; | |
95d402f0 MP |
1800 | } |
1801 | ||
450e8dee JT |
1802 | /* |
1803 | * We mustn't insert into the cache until the B_READING state | |
1804 | * is set. Otherwise another thread could get it and use | |
1805 | * it before it had been read. | |
1806 | */ | |
1807 | cache_insert(&c->cache, b); | |
95d402f0 MP |
1808 | |
1809 | return b; | |
a66cc28f MP |
1810 | |
1811 | found_buffer: | |
450e8dee JT |
1812 | if (nf == NF_PREFETCH) { |
1813 | cache_put_and_wake(c, b); | |
a66cc28f | 1814 | return NULL; |
450e8dee JT |
1815 | } |
1816 | ||
a66cc28f MP |
1817 | /* |
1818 | * Note: it is essential that we don't wait for the buffer to be | |
1819 | * read if dm_bufio_get function is used. Both dm_bufio_get and | |
1820 | * dm_bufio_prefetch can be used in the driver request routine. | |
1821 | * If the user called both dm_bufio_prefetch and dm_bufio_get on | |
1822 | * the same buffer, it would deadlock if we waited. | |
1823 | */ | |
450e8dee JT |
1824 | if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { |
1825 | cache_put_and_wake(c, b); | |
a66cc28f | 1826 | return NULL; |
450e8dee | 1827 | } |
a66cc28f | 1828 | |
a66cc28f | 1829 | return b; |
95d402f0 MP |
1830 | } |
1831 | ||
1832 | /* | |
1833 | * The endio routine for reading: set the error, clear the bit and wake up | |
1834 | * anyone waiting on the buffer. | |
1835 | */ | |
45354f1e | 1836 | static void read_endio(struct dm_buffer *b, blk_status_t status) |
95d402f0 | 1837 | { |
45354f1e | 1838 | b->read_error = status; |
95d402f0 MP |
1839 | |
1840 | BUG_ON(!test_bit(B_READING, &b->state)); | |
1841 | ||
4e857c58 | 1842 | smp_mb__before_atomic(); |
95d402f0 | 1843 | clear_bit(B_READING, &b->state); |
4e857c58 | 1844 | smp_mb__after_atomic(); |
95d402f0 MP |
1845 | |
1846 | wake_up_bit(&b->state, B_READING); | |
1847 | } | |
1848 | ||
1849 | /* | |
1850 | * A common routine for dm_bufio_new and dm_bufio_read. Operation of these | |
1851 | * functions is similar except that dm_bufio_new doesn't read the | |
1852 | * buffer from the disk (assuming that the caller overwrites all the data | |
1853 | * and uses dm_bufio_mark_buffer_dirty to write new data back). | |
1854 | */ | |
1855 | static void *new_read(struct dm_bufio_client *c, sector_t block, | |
e9b2238e HJ |
1856 | enum new_flag nf, struct dm_buffer **bp, |
1857 | unsigned short ioprio) | |
95d402f0 | 1858 | { |
450e8dee | 1859 | int need_submit = 0; |
95d402f0 MP |
1860 | struct dm_buffer *b; |
1861 | ||
2480945c MP |
1862 | LIST_HEAD(write_list); |
1863 | ||
450e8dee JT |
1864 | *bp = NULL; |
1865 | ||
1866 | /* | |
1867 | * Fast path, hopefully the block is already in the cache. No need | |
1868 | * to get the client lock for this. | |
1869 | */ | |
1870 | b = cache_get(&c->cache, block); | |
1871 | if (b) { | |
1872 | if (nf == NF_PREFETCH) { | |
1873 | cache_put_and_wake(c, b); | |
1874 | return NULL; | |
1875 | } | |
1876 | ||
1877 | /* | |
1878 | * Note: it is essential that we don't wait for the buffer to be | |
1879 | * read if dm_bufio_get function is used. Both dm_bufio_get and | |
1880 | * dm_bufio_prefetch can be used in the driver request routine. | |
1881 | * If the user called both dm_bufio_prefetch and dm_bufio_get on | |
1882 | * the same buffer, it would deadlock if we waited. | |
1883 | */ | |
1884 | if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) { | |
1885 | cache_put_and_wake(c, b); | |
1886 | return NULL; | |
1887 | } | |
1888 | } | |
1889 | ||
1890 | if (!b) { | |
1891 | if (nf == NF_GET) | |
1892 | return NULL; | |
1893 | ||
1894 | dm_bufio_lock(c); | |
1895 | b = __bufio_new(c, block, nf, &need_submit, &write_list); | |
1896 | dm_bufio_unlock(c); | |
1897 | } | |
1898 | ||
86bad0c7 | 1899 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING |
450e8dee | 1900 | if (b && (atomic_read(&b->hold_count) == 1)) |
86bad0c7 MP |
1901 | buffer_record_stack(b); |
1902 | #endif | |
95d402f0 | 1903 | |
2480945c MP |
1904 | __flush_write_list(&write_list); |
1905 | ||
a66cc28f | 1906 | if (!b) |
f98c8f79 | 1907 | return NULL; |
95d402f0 MP |
1908 | |
1909 | if (need_submit) | |
e9b2238e | 1910 | submit_io(b, REQ_OP_READ, ioprio, read_endio); |
95d402f0 | 1911 | |
2a695062 MP |
1912 | if (nf != NF_GET) /* we already tested this condition above */ |
1913 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); | |
95d402f0 MP |
1914 | |
1915 | if (b->read_error) { | |
4e4cbee9 | 1916 | int error = blk_status_to_errno(b->read_error); |
95d402f0 MP |
1917 | |
1918 | dm_bufio_release(b); | |
1919 | ||
1920 | return ERR_PTR(error); | |
1921 | } | |
1922 | ||
1923 | *bp = b; | |
1924 | ||
1925 | return b->data; | |
1926 | } | |
1927 | ||
1928 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, | |
1929 | struct dm_buffer **bp) | |
1930 | { | |
e9b2238e | 1931 | return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT); |
95d402f0 MP |
1932 | } |
1933 | EXPORT_SYMBOL_GPL(dm_bufio_get); | |
1934 | ||
e9b2238e HJ |
1935 | static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block, |
1936 | struct dm_buffer **bp, unsigned short ioprio) | |
95d402f0 | 1937 | { |
05112287 MS |
1938 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1939 | return ERR_PTR(-EINVAL); | |
95d402f0 | 1940 | |
e9b2238e HJ |
1941 | return new_read(c, block, NF_READ, bp, ioprio); |
1942 | } | |
1943 | ||
1944 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, | |
1945 | struct dm_buffer **bp) | |
1946 | { | |
1947 | return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT); | |
95d402f0 MP |
1948 | } |
1949 | EXPORT_SYMBOL_GPL(dm_bufio_read); | |
1950 | ||
e9b2238e HJ |
1951 | void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, |
1952 | struct dm_buffer **bp, unsigned short ioprio) | |
1953 | { | |
1954 | return __dm_bufio_read(c, block, bp, ioprio); | |
1955 | } | |
1956 | EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio); | |
1957 | ||
95d402f0 MP |
1958 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, |
1959 | struct dm_buffer **bp) | |
1960 | { | |
05112287 MS |
1961 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1962 | return ERR_PTR(-EINVAL); | |
95d402f0 | 1963 | |
e9b2238e | 1964 | return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT); |
95d402f0 MP |
1965 | } |
1966 | EXPORT_SYMBOL_GPL(dm_bufio_new); | |
1967 | ||
e9b2238e HJ |
1968 | static void __dm_bufio_prefetch(struct dm_bufio_client *c, |
1969 | sector_t block, unsigned int n_blocks, | |
1970 | unsigned short ioprio) | |
a66cc28f MP |
1971 | { |
1972 | struct blk_plug plug; | |
1973 | ||
2480945c MP |
1974 | LIST_HEAD(write_list); |
1975 | ||
05112287 MS |
1976 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
1977 | return; /* should never happen */ | |
3b6b7813 | 1978 | |
a66cc28f | 1979 | blk_start_plug(&plug); |
a66cc28f MP |
1980 | |
1981 | for (; n_blocks--; block++) { | |
1982 | int need_submit; | |
1983 | struct dm_buffer *b; | |
0ef0b471 | 1984 | |
450e8dee JT |
1985 | b = cache_get(&c->cache, block); |
1986 | if (b) { | |
1987 | /* already in cache */ | |
1988 | cache_put_and_wake(c, b); | |
1989 | continue; | |
1990 | } | |
1991 | ||
1992 | dm_bufio_lock(c); | |
2480945c MP |
1993 | b = __bufio_new(c, block, NF_PREFETCH, &need_submit, |
1994 | &write_list); | |
1995 | if (unlikely(!list_empty(&write_list))) { | |
1996 | dm_bufio_unlock(c); | |
1997 | blk_finish_plug(&plug); | |
1998 | __flush_write_list(&write_list); | |
1999 | blk_start_plug(&plug); | |
2000 | dm_bufio_lock(c); | |
2001 | } | |
a66cc28f MP |
2002 | if (unlikely(b != NULL)) { |
2003 | dm_bufio_unlock(c); | |
2004 | ||
2005 | if (need_submit) | |
e9b2238e | 2006 | submit_io(b, REQ_OP_READ, ioprio, read_endio); |
a66cc28f MP |
2007 | dm_bufio_release(b); |
2008 | ||
7cd32674 | 2009 | cond_resched(); |
a66cc28f MP |
2010 | |
2011 | if (!n_blocks) | |
2012 | goto flush_plug; | |
2013 | dm_bufio_lock(c); | |
2014 | } | |
450e8dee | 2015 | dm_bufio_unlock(c); |
a66cc28f MP |
2016 | } |
2017 | ||
a66cc28f MP |
2018 | flush_plug: |
2019 | blk_finish_plug(&plug); | |
2020 | } | |
e9b2238e HJ |
2021 | |
2022 | void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) | |
2023 | { | |
2024 | return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT); | |
2025 | } | |
a66cc28f MP |
2026 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch); |
2027 | ||
e9b2238e HJ |
2028 | void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, |
2029 | unsigned int n_blocks, unsigned short ioprio) | |
2030 | { | |
2031 | return __dm_bufio_prefetch(c, block, n_blocks, ioprio); | |
2032 | } | |
2033 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio); | |
2034 | ||
95d402f0 MP |
2035 | void dm_bufio_release(struct dm_buffer *b) |
2036 | { | |
2037 | struct dm_bufio_client *c = b->c; | |
2038 | ||
450e8dee JT |
2039 | /* |
2040 | * If there were errors on the buffer, and the buffer is not | |
2041 | * to be written, free the buffer. There is no point in caching | |
2042 | * invalid buffer. | |
2043 | */ | |
2044 | if ((b->read_error || b->write_error) && | |
2045 | !test_bit_acquire(B_READING, &b->state) && | |
2046 | !test_bit(B_WRITING, &b->state) && | |
2047 | !test_bit(B_DIRTY, &b->state)) { | |
2048 | dm_bufio_lock(c); | |
95d402f0 | 2049 | |
450e8dee JT |
2050 | /* cache remove can fail if there are other holders */ |
2051 | if (cache_remove(&c->cache, b)) { | |
95d402f0 | 2052 | __free_buffer_wake(b); |
450e8dee JT |
2053 | dm_bufio_unlock(c); |
2054 | return; | |
95d402f0 | 2055 | } |
450e8dee JT |
2056 | |
2057 | dm_bufio_unlock(c); | |
95d402f0 MP |
2058 | } |
2059 | ||
450e8dee | 2060 | cache_put_and_wake(c, b); |
95d402f0 MP |
2061 | } |
2062 | EXPORT_SYMBOL_GPL(dm_bufio_release); | |
2063 | ||
1e3b21c6 | 2064 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, |
86a3238c | 2065 | unsigned int start, unsigned int end) |
95d402f0 MP |
2066 | { |
2067 | struct dm_bufio_client *c = b->c; | |
2068 | ||
1e3b21c6 MP |
2069 | BUG_ON(start >= end); |
2070 | BUG_ON(end > b->c->block_size); | |
2071 | ||
95d402f0 MP |
2072 | dm_bufio_lock(c); |
2073 | ||
a66cc28f MP |
2074 | BUG_ON(test_bit(B_READING, &b->state)); |
2075 | ||
1e3b21c6 MP |
2076 | if (!test_and_set_bit(B_DIRTY, &b->state)) { |
2077 | b->dirty_start = start; | |
2078 | b->dirty_end = end; | |
450e8dee | 2079 | cache_mark(&c->cache, b, LIST_DIRTY); |
1e3b21c6 MP |
2080 | } else { |
2081 | if (start < b->dirty_start) | |
2082 | b->dirty_start = start; | |
2083 | if (end > b->dirty_end) | |
2084 | b->dirty_end = end; | |
2085 | } | |
95d402f0 MP |
2086 | |
2087 | dm_bufio_unlock(c); | |
2088 | } | |
1e3b21c6 MP |
2089 | EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); |
2090 | ||
2091 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) | |
2092 | { | |
2093 | dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); | |
2094 | } | |
95d402f0 MP |
2095 | EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); |
2096 | ||
2097 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) | |
2098 | { | |
2480945c MP |
2099 | LIST_HEAD(write_list); |
2100 | ||
05112287 MS |
2101 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2102 | return; /* should never happen */ | |
95d402f0 MP |
2103 | |
2104 | dm_bufio_lock(c); | |
2480945c | 2105 | __write_dirty_buffers_async(c, 0, &write_list); |
95d402f0 | 2106 | dm_bufio_unlock(c); |
2480945c | 2107 | __flush_write_list(&write_list); |
95d402f0 MP |
2108 | } |
2109 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); | |
2110 | ||
2111 | /* | |
2112 | * For performance, it is essential that the buffers are written asynchronously | |
2113 | * and simultaneously (so that the block layer can merge the writes) and then | |
2114 | * waited upon. | |
2115 | * | |
2116 | * Finally, we flush hardware disk cache. | |
2117 | */ | |
450e8dee JT |
2118 | static bool is_writing(struct lru_entry *e, void *context) |
2119 | { | |
2120 | struct dm_buffer *b = le_to_buffer(e); | |
2121 | ||
2122 | return test_bit(B_WRITING, &b->state); | |
2123 | } | |
2124 | ||
95d402f0 MP |
2125 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
2126 | { | |
edc11d49 | 2127 | int a, f; |
450e8dee JT |
2128 | unsigned long nr_buffers; |
2129 | struct lru_entry *e; | |
2130 | struct lru_iter it; | |
95d402f0 | 2131 | |
2480945c MP |
2132 | LIST_HEAD(write_list); |
2133 | ||
2134 | dm_bufio_lock(c); | |
2135 | __write_dirty_buffers_async(c, 0, &write_list); | |
2136 | dm_bufio_unlock(c); | |
2137 | __flush_write_list(&write_list); | |
95d402f0 | 2138 | dm_bufio_lock(c); |
95d402f0 | 2139 | |
450e8dee JT |
2140 | nr_buffers = cache_count(&c->cache, LIST_DIRTY); |
2141 | lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it); | |
2142 | while ((e = lru_iter_next(&it, is_writing, c))) { | |
2143 | struct dm_buffer *b = le_to_buffer(e); | |
2144 | __cache_inc_buffer(b); | |
95d402f0 MP |
2145 | |
2146 | BUG_ON(test_bit(B_READING, &b->state)); | |
2147 | ||
450e8dee JT |
2148 | if (nr_buffers) { |
2149 | nr_buffers--; | |
2150 | dm_bufio_unlock(c); | |
2151 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); | |
2152 | dm_bufio_lock(c); | |
2153 | } else { | |
2154 | wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); | |
95d402f0 MP |
2155 | } |
2156 | ||
450e8dee JT |
2157 | if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state)) |
2158 | cache_mark(&c->cache, b, LIST_CLEAN); | |
95d402f0 | 2159 | |
450e8dee | 2160 | cache_put_and_wake(c, b); |
95d402f0 | 2161 | |
450e8dee | 2162 | cond_resched(); |
95d402f0 | 2163 | } |
450e8dee JT |
2164 | lru_iter_end(&it); |
2165 | ||
95d402f0 MP |
2166 | wake_up(&c->free_buffer_wait); |
2167 | dm_bufio_unlock(c); | |
2168 | ||
2169 | a = xchg(&c->async_write_error, 0); | |
2170 | f = dm_bufio_issue_flush(c); | |
2171 | if (a) | |
2172 | return a; | |
2173 | ||
2174 | return f; | |
2175 | } | |
2176 | EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); | |
2177 | ||
2178 | /* | |
ef992373 | 2179 | * Use dm-io to send an empty barrier to flush the device. |
95d402f0 MP |
2180 | */ |
2181 | int dm_bufio_issue_flush(struct dm_bufio_client *c) | |
2182 | { | |
2183 | struct dm_io_request io_req = { | |
581075e4 | 2184 | .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, |
95d402f0 MP |
2185 | .mem.type = DM_IO_KMEM, |
2186 | .mem.ptr.addr = NULL, | |
2187 | .client = c->dm_io, | |
2188 | }; | |
2189 | struct dm_io_region io_reg = { | |
2190 | .bdev = c->bdev, | |
2191 | .sector = 0, | |
2192 | .count = 0, | |
2193 | }; | |
2194 | ||
05112287 MS |
2195 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2196 | return -EINVAL; | |
95d402f0 | 2197 | |
6e5f0f63 | 2198 | return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT); |
95d402f0 MP |
2199 | } |
2200 | EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); | |
2201 | ||
6fbeb004 MP |
2202 | /* |
2203 | * Use dm-io to send a discard request to flush the device. | |
2204 | */ | |
2205 | int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) | |
2206 | { | |
2207 | struct dm_io_request io_req = { | |
581075e4 | 2208 | .bi_opf = REQ_OP_DISCARD | REQ_SYNC, |
6fbeb004 MP |
2209 | .mem.type = DM_IO_KMEM, |
2210 | .mem.ptr.addr = NULL, | |
2211 | .client = c->dm_io, | |
2212 | }; | |
2213 | struct dm_io_region io_reg = { | |
2214 | .bdev = c->bdev, | |
2215 | .sector = block_to_sector(c, block), | |
2216 | .count = block_to_sector(c, count), | |
2217 | }; | |
2218 | ||
05112287 MS |
2219 | if (WARN_ON_ONCE(dm_bufio_in_request())) |
2220 | return -EINVAL; /* discards are optional */ | |
6fbeb004 | 2221 | |
6e5f0f63 | 2222 | return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT); |
6fbeb004 MP |
2223 | } |
2224 | EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); | |
2225 | ||
3be1f253 | 2226 | static void forget_buffer(struct dm_bufio_client *c, sector_t block) |
33a18062 | 2227 | { |
450e8dee JT |
2228 | struct dm_buffer *b; |
2229 | ||
2230 | b = cache_get(&c->cache, block); | |
2231 | if (b) { | |
2232 | if (likely(!smp_load_acquire(&b->state))) { | |
2233 | if (cache_remove(&c->cache, b)) | |
2234 | __free_buffer_wake(b); | |
2235 | else | |
2236 | cache_put_and_wake(c, b); | |
2237 | } else { | |
2238 | cache_put_and_wake(c, b); | |
2239 | } | |
33a18062 MP |
2240 | } |
2241 | } | |
2242 | ||
55494bf2 MP |
2243 | /* |
2244 | * Free the given buffer. | |
2245 | * | |
2246 | * This is just a hint, if the buffer is in use or dirty, this function | |
2247 | * does nothing. | |
2248 | */ | |
2249 | void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) | |
2250 | { | |
55494bf2 | 2251 | dm_bufio_lock(c); |
450e8dee | 2252 | forget_buffer(c, block); |
55494bf2 MP |
2253 | dm_bufio_unlock(c); |
2254 | } | |
afa53df8 | 2255 | EXPORT_SYMBOL_GPL(dm_bufio_forget); |
55494bf2 | 2256 | |
450e8dee | 2257 | static enum evict_result idle(struct dm_buffer *b, void *context) |
33a18062 | 2258 | { |
450e8dee JT |
2259 | return b->state ? ER_DONT_EVICT : ER_EVICT; |
2260 | } | |
33a18062 | 2261 | |
450e8dee JT |
2262 | void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) |
2263 | { | |
2264 | dm_bufio_lock(c); | |
2265 | cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake); | |
2266 | dm_bufio_unlock(c); | |
33a18062 MP |
2267 | } |
2268 | EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers); | |
2269 | ||
86a3238c | 2270 | void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n) |
55b082e6 MP |
2271 | { |
2272 | c->minimum_buffers = n; | |
2273 | } | |
afa53df8 | 2274 | EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers); |
55b082e6 | 2275 | |
86a3238c | 2276 | unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c) |
95d402f0 MP |
2277 | { |
2278 | return c->block_size; | |
2279 | } | |
2280 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_size); | |
2281 | ||
2282 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) | |
2283 | { | |
6dcbb52c | 2284 | sector_t s = bdev_nr_sectors(c->bdev); |
0ef0b471 | 2285 | |
a14e5ec6 MP |
2286 | if (s >= c->start) |
2287 | s -= c->start; | |
2288 | else | |
2289 | s = 0; | |
f51f2e0a MP |
2290 | if (likely(c->sectors_per_block_bits >= 0)) |
2291 | s >>= c->sectors_per_block_bits; | |
2292 | else | |
2293 | sector_div(s, c->block_size >> SECTOR_SHIFT); | |
2294 | return s; | |
95d402f0 MP |
2295 | } |
2296 | EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); | |
2297 | ||
9b594826 MP |
2298 | struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) |
2299 | { | |
2300 | return c->dm_io; | |
2301 | } | |
2302 | EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); | |
2303 | ||
95d402f0 MP |
2304 | sector_t dm_bufio_get_block_number(struct dm_buffer *b) |
2305 | { | |
2306 | return b->block; | |
2307 | } | |
2308 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_number); | |
2309 | ||
2310 | void *dm_bufio_get_block_data(struct dm_buffer *b) | |
2311 | { | |
2312 | return b->data; | |
2313 | } | |
2314 | EXPORT_SYMBOL_GPL(dm_bufio_get_block_data); | |
2315 | ||
2316 | void *dm_bufio_get_aux_data(struct dm_buffer *b) | |
2317 | { | |
2318 | return b + 1; | |
2319 | } | |
2320 | EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data); | |
2321 | ||
2322 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b) | |
2323 | { | |
2324 | return b->c; | |
2325 | } | |
2326 | EXPORT_SYMBOL_GPL(dm_bufio_get_client); | |
2327 | ||
450e8dee JT |
2328 | static enum it_action warn_leak(struct dm_buffer *b, void *context) |
2329 | { | |
2330 | bool *warned = context; | |
2331 | ||
2332 | WARN_ON(!(*warned)); | |
2333 | *warned = true; | |
2334 | DMERR("leaked buffer %llx, hold count %u, list %d", | |
2335 | (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode); | |
2336 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING | |
2337 | stack_trace_print(b->stack_entries, b->stack_len, 1); | |
2338 | /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */ | |
2339 | atomic_set(&b->hold_count, 0); | |
2340 | #endif | |
2341 | return IT_NEXT; | |
2342 | } | |
2343 | ||
95d402f0 MP |
2344 | static void drop_buffers(struct dm_bufio_client *c) |
2345 | { | |
95d402f0 | 2346 | int i; |
450e8dee | 2347 | struct dm_buffer *b; |
95d402f0 | 2348 | |
b75a80f4 MS |
2349 | if (WARN_ON(dm_bufio_in_request())) |
2350 | return; /* should never happen */ | |
95d402f0 MP |
2351 | |
2352 | /* | |
2353 | * An optimization so that the buffers are not written one-by-one. | |
2354 | */ | |
2355 | dm_bufio_write_dirty_buffers_async(c); | |
2356 | ||
2357 | dm_bufio_lock(c); | |
2358 | ||
2359 | while ((b = __get_unclaimed_buffer(c))) | |
2360 | __free_buffer_wake(b); | |
2361 | ||
450e8dee JT |
2362 | for (i = 0; i < LIST_SIZE; i++) { |
2363 | bool warned = false; | |
2364 | ||
2365 | cache_iterate(&c->cache, i, warn_leak, &warned); | |
2366 | } | |
86bad0c7 MP |
2367 | |
2368 | #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING | |
2369 | while ((b = __get_unclaimed_buffer(c))) | |
2370 | __free_buffer_wake(b); | |
2371 | #endif | |
95d402f0 MP |
2372 | |
2373 | for (i = 0; i < LIST_SIZE; i++) | |
450e8dee | 2374 | WARN_ON(cache_count(&c->cache, i)); |
95d402f0 MP |
2375 | |
2376 | dm_bufio_unlock(c); | |
2377 | } | |
2378 | ||
13840d38 | 2379 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
33096a78 | 2380 | { |
f51f2e0a | 2381 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
0ef0b471 | 2382 | |
f51f2e0a MP |
2383 | if (likely(c->sectors_per_block_bits >= 0)) |
2384 | retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; | |
2385 | else | |
2386 | retain_bytes /= c->block_size; | |
0ef0b471 | 2387 | |
f51f2e0a | 2388 | return retain_bytes; |
33096a78 JT |
2389 | } |
2390 | ||
70704c33 | 2391 | static void __scan(struct dm_bufio_client *c) |
95d402f0 MP |
2392 | { |
2393 | int l; | |
450e8dee | 2394 | struct dm_buffer *b; |
33096a78 | 2395 | unsigned long freed = 0; |
13840d38 | 2396 | unsigned long retain_target = get_retain_buffers(c); |
450e8dee | 2397 | unsigned long count = cache_total(&c->cache); |
95d402f0 MP |
2398 | |
2399 | for (l = 0; l < LIST_SIZE; l++) { | |
450e8dee | 2400 | while (true) { |
70704c33 MP |
2401 | if (count - freed <= retain_target) |
2402 | atomic_long_set(&c->need_shrink, 0); | |
2403 | if (!atomic_long_read(&c->need_shrink)) | |
450e8dee JT |
2404 | break; |
2405 | ||
2406 | b = cache_evict(&c->cache, l, | |
2407 | l == LIST_CLEAN ? is_clean : is_dirty, c); | |
2408 | if (!b) | |
2409 | break; | |
2410 | ||
2411 | __make_buffer_clean(b); | |
2412 | __free_buffer_wake(b); | |
2413 | ||
2414 | atomic_long_dec(&c->need_shrink); | |
2415 | freed++; | |
a3d8f0a7 LW |
2416 | |
2417 | if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) { | |
2418 | dm_bufio_unlock(c); | |
2419 | cond_resched(); | |
2420 | dm_bufio_lock(c); | |
2421 | } | |
7dc19d5a | 2422 | } |
95d402f0 MP |
2423 | } |
2424 | } | |
2425 | ||
70704c33 MP |
2426 | static void shrink_work(struct work_struct *w) |
2427 | { | |
2428 | struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work); | |
2429 | ||
2430 | dm_bufio_lock(c); | |
2431 | __scan(c); | |
2432 | dm_bufio_unlock(c); | |
2433 | } | |
2434 | ||
2435 | static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
95d402f0 | 2436 | { |
7dc19d5a | 2437 | struct dm_bufio_client *c; |
95d402f0 | 2438 | |
1f1d459c | 2439 | c = shrink->private_data; |
70704c33 MP |
2440 | atomic_long_add(sc->nr_to_scan, &c->need_shrink); |
2441 | queue_work(dm_bufio_wq, &c->shrink_work); | |
95d402f0 | 2442 | |
70704c33 | 2443 | return sc->nr_to_scan; |
7dc19d5a | 2444 | } |
95d402f0 | 2445 | |
70704c33 | 2446 | static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
7dc19d5a | 2447 | { |
1f1d459c | 2448 | struct dm_bufio_client *c = shrink->private_data; |
450e8dee | 2449 | unsigned long count = cache_total(&c->cache); |
fbc7c07e | 2450 | unsigned long retain_target = get_retain_buffers(c); |
70704c33 MP |
2451 | unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink); |
2452 | ||
2453 | if (unlikely(count < retain_target)) | |
2454 | count = 0; | |
2455 | else | |
2456 | count -= retain_target; | |
95d402f0 | 2457 | |
70704c33 MP |
2458 | if (unlikely(count < queued_for_cleanup)) |
2459 | count = 0; | |
2460 | else | |
2461 | count -= queued_for_cleanup; | |
2462 | ||
2463 | return count; | |
95d402f0 MP |
2464 | } |
2465 | ||
2466 | /* | |
2467 | * Create the buffering interface | |
2468 | */ | |
86a3238c HM |
2469 | struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size, |
2470 | unsigned int reserved_buffers, unsigned int aux_size, | |
95d402f0 | 2471 | void (*alloc_callback)(struct dm_buffer *), |
0fcb100d NH |
2472 | void (*write_callback)(struct dm_buffer *), |
2473 | unsigned int flags) | |
95d402f0 MP |
2474 | { |
2475 | int r; | |
1e84c4b7 | 2476 | unsigned int num_locks; |
95d402f0 | 2477 | struct dm_bufio_client *c; |
42964e4b MP |
2478 | char slab_name[64]; |
2479 | static atomic_t seqno = ATOMIC_INIT(0); | |
95d402f0 | 2480 | |
f51f2e0a MP |
2481 | if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { |
2482 | DMERR("%s: block size not specified or is not multiple of 512b", __func__); | |
2483 | r = -EINVAL; | |
2484 | goto bad_client; | |
2485 | } | |
95d402f0 | 2486 | |
1e84c4b7 MS |
2487 | num_locks = dm_num_hash_locks(); |
2488 | c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL); | |
95d402f0 MP |
2489 | if (!c) { |
2490 | r = -ENOMEM; | |
2491 | goto bad_client; | |
2492 | } | |
2a695062 | 2493 | cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0); |
95d402f0 MP |
2494 | |
2495 | c->bdev = bdev; | |
2496 | c->block_size = block_size; | |
f51f2e0a MP |
2497 | if (is_power_of_2(block_size)) |
2498 | c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; | |
2499 | else | |
2500 | c->sectors_per_block_bits = -1; | |
95d402f0 | 2501 | |
95d402f0 MP |
2502 | c->alloc_callback = alloc_callback; |
2503 | c->write_callback = write_callback; | |
2504 | ||
3c1c875d | 2505 | if (flags & DM_BUFIO_CLIENT_NO_SLEEP) { |
b32d4582 | 2506 | c->no_sleep = true; |
3c1c875d MS |
2507 | static_branch_inc(&no_sleep_enabled); |
2508 | } | |
b32d4582 | 2509 | |
95d402f0 | 2510 | mutex_init(&c->lock); |
b32d4582 | 2511 | spin_lock_init(&c->spinlock); |
95d402f0 MP |
2512 | INIT_LIST_HEAD(&c->reserved_buffers); |
2513 | c->need_reserved_buffers = reserved_buffers; | |
2514 | ||
afa53df8 | 2515 | dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); |
55b082e6 | 2516 | |
95d402f0 MP |
2517 | init_waitqueue_head(&c->free_buffer_wait); |
2518 | c->async_write_error = 0; | |
2519 | ||
2520 | c->dm_io = dm_io_client_create(); | |
2521 | if (IS_ERR(c->dm_io)) { | |
2522 | r = PTR_ERR(c->dm_io); | |
2523 | goto bad_dm_io; | |
2524 | } | |
2525 | ||
61a57254 | 2526 | if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) { |
86a3238c | 2527 | unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE); |
0ef0b471 | 2528 | |
42964e4b MP |
2529 | snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u", |
2530 | block_size, atomic_inc_return(&seqno)); | |
f7879b4c | 2531 | c->slab_cache = kmem_cache_create(slab_name, block_size, align, |
6b5e718c | 2532 | SLAB_RECLAIM_ACCOUNT, NULL); |
21bb1327 MP |
2533 | if (!c->slab_cache) { |
2534 | r = -ENOMEM; | |
2535 | goto bad; | |
95d402f0 MP |
2536 | } |
2537 | } | |
359dbf19 | 2538 | if (aux_size) |
42964e4b MP |
2539 | snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u", |
2540 | aux_size, atomic_inc_return(&seqno)); | |
359dbf19 | 2541 | else |
42964e4b MP |
2542 | snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", |
2543 | atomic_inc_return(&seqno)); | |
359dbf19 MP |
2544 | c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, |
2545 | 0, SLAB_RECLAIM_ACCOUNT, NULL); | |
2546 | if (!c->slab_buffer) { | |
2547 | r = -ENOMEM; | |
2548 | goto bad; | |
2549 | } | |
95d402f0 MP |
2550 | |
2551 | while (c->need_reserved_buffers) { | |
2552 | struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); | |
2553 | ||
2554 | if (!b) { | |
2555 | r = -ENOMEM; | |
0e696d38 | 2556 | goto bad; |
95d402f0 MP |
2557 | } |
2558 | __free_buffer_wake(b); | |
2559 | } | |
2560 | ||
70704c33 MP |
2561 | INIT_WORK(&c->shrink_work, shrink_work); |
2562 | atomic_long_set(&c->need_shrink, 0); | |
2563 | ||
1f1d459c QZ |
2564 | c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)", |
2565 | MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); | |
2566 | if (!c->shrinker) { | |
2567 | r = -ENOMEM; | |
0e696d38 | 2568 | goto bad; |
1f1d459c QZ |
2569 | } |
2570 | ||
2571 | c->shrinker->count_objects = dm_bufio_shrink_count; | |
2572 | c->shrinker->scan_objects = dm_bufio_shrink_scan; | |
2573 | c->shrinker->seeks = 1; | |
2574 | c->shrinker->batch = 0; | |
2575 | c->shrinker->private_data = c; | |
2576 | ||
2577 | shrinker_register(c->shrinker); | |
46898e9a | 2578 | |
95d402f0 MP |
2579 | mutex_lock(&dm_bufio_clients_lock); |
2580 | dm_bufio_client_count++; | |
2581 | list_add(&c->client_list, &dm_bufio_all_clients); | |
2582 | __cache_size_refresh(); | |
2583 | mutex_unlock(&dm_bufio_clients_lock); | |
2584 | ||
95d402f0 MP |
2585 | return c; |
2586 | ||
0e696d38 | 2587 | bad: |
95d402f0 | 2588 | while (!list_empty(&c->reserved_buffers)) { |
450e8dee JT |
2589 | struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); |
2590 | ||
2591 | list_del(&b->lru.list); | |
95d402f0 MP |
2592 | free_buffer(b); |
2593 | } | |
21bb1327 | 2594 | kmem_cache_destroy(c->slab_cache); |
359dbf19 | 2595 | kmem_cache_destroy(c->slab_buffer); |
95d402f0 MP |
2596 | dm_io_client_destroy(c->dm_io); |
2597 | bad_dm_io: | |
bde14184 | 2598 | mutex_destroy(&c->lock); |
0dfc1f4c ZC |
2599 | if (c->no_sleep) |
2600 | static_branch_dec(&no_sleep_enabled); | |
95d402f0 MP |
2601 | kfree(c); |
2602 | bad_client: | |
2603 | return ERR_PTR(r); | |
2604 | } | |
2605 | EXPORT_SYMBOL_GPL(dm_bufio_client_create); | |
2606 | ||
2607 | /* | |
2608 | * Free the buffering interface. | |
2609 | * It is required that there are no references on any buffers. | |
2610 | */ | |
2611 | void dm_bufio_client_destroy(struct dm_bufio_client *c) | |
2612 | { | |
86a3238c | 2613 | unsigned int i; |
95d402f0 MP |
2614 | |
2615 | drop_buffers(c); | |
2616 | ||
1f1d459c | 2617 | shrinker_free(c->shrinker); |
70704c33 | 2618 | flush_work(&c->shrink_work); |
95d402f0 MP |
2619 | |
2620 | mutex_lock(&dm_bufio_clients_lock); | |
2621 | ||
2622 | list_del(&c->client_list); | |
2623 | dm_bufio_client_count--; | |
2624 | __cache_size_refresh(); | |
2625 | ||
2626 | mutex_unlock(&dm_bufio_clients_lock); | |
2627 | ||
555977dd | 2628 | WARN_ON(c->need_reserved_buffers); |
95d402f0 MP |
2629 | |
2630 | while (!list_empty(&c->reserved_buffers)) { | |
450e8dee JT |
2631 | struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next); |
2632 | ||
2633 | list_del(&b->lru.list); | |
95d402f0 MP |
2634 | free_buffer(b); |
2635 | } | |
2636 | ||
2637 | for (i = 0; i < LIST_SIZE; i++) | |
450e8dee JT |
2638 | if (cache_count(&c->cache, i)) |
2639 | DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i)); | |
95d402f0 MP |
2640 | |
2641 | for (i = 0; i < LIST_SIZE; i++) | |
450e8dee | 2642 | WARN_ON(cache_count(&c->cache, i)); |
95d402f0 | 2643 | |
450e8dee | 2644 | cache_destroy(&c->cache); |
21bb1327 | 2645 | kmem_cache_destroy(c->slab_cache); |
359dbf19 | 2646 | kmem_cache_destroy(c->slab_buffer); |
95d402f0 | 2647 | dm_io_client_destroy(c->dm_io); |
bde14184 | 2648 | mutex_destroy(&c->lock); |
3c1c875d MS |
2649 | if (c->no_sleep) |
2650 | static_branch_dec(&no_sleep_enabled); | |
95d402f0 MP |
2651 | kfree(c); |
2652 | } | |
2653 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); | |
2654 | ||
d4830012 LL |
2655 | void dm_bufio_client_reset(struct dm_bufio_client *c) |
2656 | { | |
2657 | drop_buffers(c); | |
2658 | flush_work(&c->shrink_work); | |
2659 | } | |
2660 | EXPORT_SYMBOL_GPL(dm_bufio_client_reset); | |
2661 | ||
400a0bef MP |
2662 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) |
2663 | { | |
2664 | c->start = start; | |
2665 | } | |
2666 | EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); | |
2667 | ||
450e8dee JT |
2668 | /*--------------------------------------------------------------*/ |
2669 | ||
450e8dee JT |
2670 | /* |
2671 | * Global cleanup tries to evict the oldest buffers from across _all_ | |
2672 | * the clients. It does this by repeatedly evicting a few buffers from | |
2673 | * the client that holds the oldest buffer. It's approximate, but hopefully | |
2674 | * good enough. | |
2675 | */ | |
2676 | static struct dm_bufio_client *__pop_client(void) | |
2677 | { | |
2678 | struct list_head *h; | |
6e913b28 | 2679 | |
450e8dee JT |
2680 | if (list_empty(&dm_bufio_all_clients)) |
2681 | return NULL; | |
2682 | ||
2683 | h = dm_bufio_all_clients.next; | |
2684 | list_del(h); | |
2685 | return container_of(h, struct dm_bufio_client, client_list); | |
2686 | } | |
2687 | ||
2688 | /* | |
2689 | * Inserts the client in the global client list based on its | |
2690 | * 'oldest_buffer' field. | |
2691 | */ | |
2692 | static void __insert_client(struct dm_bufio_client *new_client) | |
2693 | { | |
2694 | struct dm_bufio_client *c; | |
2695 | struct list_head *h = dm_bufio_all_clients.next; | |
2696 | ||
2697 | while (h != &dm_bufio_all_clients) { | |
2698 | c = container_of(h, struct dm_bufio_client, client_list); | |
2699 | if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer)) | |
2700 | break; | |
2701 | h = h->next; | |
6e913b28 MP |
2702 | } |
2703 | ||
450e8dee JT |
2704 | list_add_tail(&new_client->client_list, h); |
2705 | } | |
6e913b28 | 2706 | |
97693781 EB |
2707 | static enum evict_result select_for_evict(struct dm_buffer *b, void *context) |
2708 | { | |
2709 | /* In no-sleep mode, we cannot wait on IO. */ | |
2710 | if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) { | |
2711 | if (test_bit_acquire(B_READING, &b->state) || | |
2712 | test_bit(B_WRITING, &b->state) || | |
2713 | test_bit(B_DIRTY, &b->state)) | |
2714 | return ER_DONT_EVICT; | |
2715 | } | |
2716 | return ER_EVICT; | |
2717 | } | |
2718 | ||
450e8dee JT |
2719 | static unsigned long __evict_a_few(unsigned long nr_buffers) |
2720 | { | |
450e8dee | 2721 | struct dm_bufio_client *c; |
97693781 EB |
2722 | unsigned long oldest_buffer = jiffies; |
2723 | unsigned long last_accessed; | |
2724 | unsigned long count; | |
2725 | struct dm_buffer *b; | |
6e913b28 | 2726 | |
450e8dee JT |
2727 | c = __pop_client(); |
2728 | if (!c) | |
2729 | return 0; | |
2730 | ||
2731 | dm_bufio_lock(c); | |
97693781 EB |
2732 | |
2733 | for (count = 0; count < nr_buffers; count++) { | |
2734 | b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL); | |
2735 | if (!b) | |
2736 | break; | |
2737 | ||
2738 | last_accessed = READ_ONCE(b->last_accessed); | |
2739 | if (time_after_eq(oldest_buffer, last_accessed)) | |
2740 | oldest_buffer = last_accessed; | |
2741 | ||
2742 | __make_buffer_clean(b); | |
2743 | __free_buffer_wake(b); | |
2744 | ||
2745 | cond_resched(); | |
2746 | } | |
2747 | ||
450e8dee JT |
2748 | dm_bufio_unlock(c); |
2749 | ||
2750 | if (count) | |
97693781 | 2751 | c->oldest_buffer = oldest_buffer; |
450e8dee JT |
2752 | __insert_client(c); |
2753 | ||
2754 | return count; | |
6e913b28 MP |
2755 | } |
2756 | ||
450e8dee | 2757 | static void check_watermarks(void) |
33096a78 | 2758 | { |
450e8dee | 2759 | LIST_HEAD(write_list); |
33096a78 JT |
2760 | struct dm_bufio_client *c; |
2761 | ||
2762 | mutex_lock(&dm_bufio_clients_lock); | |
450e8dee JT |
2763 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) { |
2764 | dm_bufio_lock(c); | |
2765 | __check_watermark(c, &write_list); | |
2766 | dm_bufio_unlock(c); | |
2767 | } | |
2768 | mutex_unlock(&dm_bufio_clients_lock); | |
33096a78 | 2769 | |
450e8dee JT |
2770 | __flush_write_list(&write_list); |
2771 | } | |
390020ad | 2772 | |
450e8dee JT |
2773 | static void evict_old(void) |
2774 | { | |
2775 | unsigned long threshold = dm_bufio_cache_size - | |
2776 | dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO; | |
33096a78 | 2777 | |
450e8dee JT |
2778 | mutex_lock(&dm_bufio_clients_lock); |
2779 | while (dm_bufio_current_allocated > threshold) { | |
2780 | if (!__evict_a_few(64)) | |
2781 | break; | |
2782 | cond_resched(); | |
2783 | } | |
95d402f0 MP |
2784 | mutex_unlock(&dm_bufio_clients_lock); |
2785 | } | |
2786 | ||
450e8dee | 2787 | static void do_global_cleanup(struct work_struct *w) |
95d402f0 | 2788 | { |
450e8dee JT |
2789 | check_watermarks(); |
2790 | evict_old(); | |
95d402f0 MP |
2791 | } |
2792 | ||
a4a82ce3 HM |
2793 | /* |
2794 | *-------------------------------------------------------------- | |
95d402f0 | 2795 | * Module setup |
a4a82ce3 HM |
2796 | *-------------------------------------------------------------- |
2797 | */ | |
95d402f0 MP |
2798 | |
2799 | /* | |
2800 | * This is called only once for the whole dm_bufio module. | |
2801 | * It initializes memory limit. | |
2802 | */ | |
2803 | static int __init dm_bufio_init(void) | |
2804 | { | |
2805 | __u64 mem; | |
2806 | ||
4cb57ab4 | 2807 | dm_bufio_allocated_kmem_cache = 0; |
61a57254 | 2808 | dm_bufio_allocated_kmalloc = 0; |
4cb57ab4 MP |
2809 | dm_bufio_allocated_get_free_pages = 0; |
2810 | dm_bufio_allocated_vmalloc = 0; | |
2811 | dm_bufio_current_allocated = 0; | |
2812 | ||
ca79b0c2 | 2813 | mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), |
74d4108d | 2814 | DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; |
95d402f0 MP |
2815 | |
2816 | if (mem > ULONG_MAX) | |
2817 | mem = ULONG_MAX; | |
2818 | ||
2819 | #ifdef CONFIG_MMU | |
74d4108d EB |
2820 | if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) |
2821 | mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); | |
95d402f0 MP |
2822 | #endif |
2823 | ||
2824 | dm_bufio_default_cache_size = mem; | |
2825 | ||
2826 | mutex_lock(&dm_bufio_clients_lock); | |
2827 | __cache_size_refresh(); | |
2828 | mutex_unlock(&dm_bufio_clients_lock); | |
2829 | ||
edd1ea2a | 2830 | dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0); |
95d402f0 MP |
2831 | if (!dm_bufio_wq) |
2832 | return -ENOMEM; | |
2833 | ||
6e913b28 | 2834 | INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); |
95d402f0 MP |
2835 | |
2836 | return 0; | |
2837 | } | |
2838 | ||
2839 | /* | |
2840 | * This is called once when unloading the dm_bufio module. | |
2841 | */ | |
2842 | static void __exit dm_bufio_exit(void) | |
2843 | { | |
2844 | int bug = 0; | |
95d402f0 | 2845 | |
95d402f0 MP |
2846 | destroy_workqueue(dm_bufio_wq); |
2847 | ||
95d402f0 MP |
2848 | if (dm_bufio_client_count) { |
2849 | DMCRIT("%s: dm_bufio_client_count leaked: %d", | |
2850 | __func__, dm_bufio_client_count); | |
2851 | bug = 1; | |
2852 | } | |
2853 | ||
2854 | if (dm_bufio_current_allocated) { | |
2855 | DMCRIT("%s: dm_bufio_current_allocated leaked: %lu", | |
2856 | __func__, dm_bufio_current_allocated); | |
2857 | bug = 1; | |
2858 | } | |
2859 | ||
2860 | if (dm_bufio_allocated_get_free_pages) { | |
2861 | DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu", | |
2862 | __func__, dm_bufio_allocated_get_free_pages); | |
2863 | bug = 1; | |
2864 | } | |
2865 | ||
2866 | if (dm_bufio_allocated_vmalloc) { | |
2867 | DMCRIT("%s: dm_bufio_vmalloc leaked: %lu", | |
2868 | __func__, dm_bufio_allocated_vmalloc); | |
2869 | bug = 1; | |
2870 | } | |
2871 | ||
555977dd | 2872 | WARN_ON(bug); /* leaks are not worth crashing the system */ |
95d402f0 MP |
2873 | } |
2874 | ||
2875 | module_init(dm_bufio_init) | |
2876 | module_exit(dm_bufio_exit) | |
2877 | ||
6a808034 | 2878 | module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644); |
95d402f0 MP |
2879 | MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); |
2880 | ||
6a808034 | 2881 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644); |
97693781 | 2882 | MODULE_PARM_DESC(max_age_seconds, "No longer does anything"); |
33096a78 | 2883 | |
6a808034 | 2884 | module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644); |
33096a78 | 2885 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); |
95d402f0 | 2886 | |
6a808034 | 2887 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644); |
95d402f0 MP |
2888 | MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory"); |
2889 | ||
6a808034 | 2890 | module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444); |
95d402f0 MP |
2891 | MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc"); |
2892 | ||
61a57254 MP |
2893 | module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444); |
2894 | MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc"); | |
2895 | ||
6a808034 | 2896 | module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444); |
95d402f0 MP |
2897 | MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages"); |
2898 | ||
6a808034 | 2899 | module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444); |
95d402f0 MP |
2900 | MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc"); |
2901 | ||
6a808034 | 2902 | module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444); |
95d402f0 MP |
2903 | MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache"); |
2904 | ||
fa34e589 | 2905 | MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>"); |
95d402f0 MP |
2906 | MODULE_DESCRIPTION(DM_NAME " buffered I/O library"); |
2907 | MODULE_LICENSE("GPL"); |