Commit | Line | Data |
---|---|---|
3bd94003 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
66a63635 JT |
2 | /* |
3 | * Copyright (C) 2015 Red Hat. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
b29d4986 | 8 | #include "dm-cache-background-tracker.h" |
66a63635 | 9 | #include "dm-cache-policy-internal.h" |
b29d4986 | 10 | #include "dm-cache-policy.h" |
66a63635 JT |
11 | #include "dm.h" |
12 | ||
13 | #include <linux/hash.h> | |
14 | #include <linux/jiffies.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/vmalloc.h> | |
18 | #include <linux/math64.h> | |
19 | ||
20 | #define DM_MSG_PREFIX "cache-policy-smq" | |
21 | ||
22 | /*----------------------------------------------------------------*/ | |
23 | ||
24 | /* | |
25 | * Safe division functions that return zero on divide by zero. | |
26 | */ | |
86a3238c | 27 | static unsigned int safe_div(unsigned int n, unsigned int d) |
66a63635 JT |
28 | { |
29 | return d ? n / d : 0u; | |
30 | } | |
31 | ||
86a3238c | 32 | static unsigned int safe_mod(unsigned int n, unsigned int d) |
66a63635 JT |
33 | { |
34 | return d ? n % d : 0u; | |
35 | } | |
36 | ||
37 | /*----------------------------------------------------------------*/ | |
38 | ||
39 | struct entry { | |
86a3238c HM |
40 | unsigned int hash_next:28; |
41 | unsigned int prev:28; | |
42 | unsigned int next:28; | |
43 | unsigned int level:6; | |
66a63635 JT |
44 | bool dirty:1; |
45 | bool allocated:1; | |
46 | bool sentinel:1; | |
b29d4986 | 47 | bool pending_work:1; |
66a63635 JT |
48 | |
49 | dm_oblock_t oblock; | |
50 | }; | |
51 | ||
52 | /*----------------------------------------------------------------*/ | |
53 | ||
54 | #define INDEXER_NULL ((1u << 28u) - 1u) | |
55 | ||
56 | /* | |
57 | * An entry_space manages a set of entries that we use for the queues. | |
58 | * The clean and dirty queues share entries, so this object is separate | |
59 | * from the queue itself. | |
60 | */ | |
61 | struct entry_space { | |
62 | struct entry *begin; | |
63 | struct entry *end; | |
64 | }; | |
65 | ||
86a3238c | 66 | static int space_init(struct entry_space *es, unsigned int nr_entries) |
66a63635 JT |
67 | { |
68 | if (!nr_entries) { | |
69 | es->begin = es->end = NULL; | |
70 | return 0; | |
71 | } | |
72 | ||
fad953ce | 73 | es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry))); |
66a63635 JT |
74 | if (!es->begin) |
75 | return -ENOMEM; | |
76 | ||
77 | es->end = es->begin + nr_entries; | |
78 | return 0; | |
79 | } | |
80 | ||
81 | static void space_exit(struct entry_space *es) | |
82 | { | |
83 | vfree(es->begin); | |
84 | } | |
85 | ||
86a3238c | 86 | static struct entry *__get_entry(struct entry_space *es, unsigned int block) |
66a63635 JT |
87 | { |
88 | struct entry *e; | |
89 | ||
90 | e = es->begin + block; | |
91 | BUG_ON(e >= es->end); | |
92 | ||
93 | return e; | |
94 | } | |
95 | ||
86a3238c | 96 | static unsigned int to_index(struct entry_space *es, struct entry *e) |
66a63635 JT |
97 | { |
98 | BUG_ON(e < es->begin || e >= es->end); | |
99 | return e - es->begin; | |
100 | } | |
101 | ||
86a3238c | 102 | static struct entry *to_entry(struct entry_space *es, unsigned int block) |
66a63635 JT |
103 | { |
104 | if (block == INDEXER_NULL) | |
105 | return NULL; | |
106 | ||
107 | return __get_entry(es, block); | |
108 | } | |
109 | ||
110 | /*----------------------------------------------------------------*/ | |
111 | ||
112 | struct ilist { | |
86a3238c HM |
113 | unsigned int nr_elts; /* excluding sentinel entries */ |
114 | unsigned int head, tail; | |
66a63635 JT |
115 | }; |
116 | ||
117 | static void l_init(struct ilist *l) | |
118 | { | |
119 | l->nr_elts = 0; | |
120 | l->head = l->tail = INDEXER_NULL; | |
121 | } | |
122 | ||
123 | static struct entry *l_head(struct entry_space *es, struct ilist *l) | |
124 | { | |
125 | return to_entry(es, l->head); | |
126 | } | |
127 | ||
128 | static struct entry *l_tail(struct entry_space *es, struct ilist *l) | |
129 | { | |
130 | return to_entry(es, l->tail); | |
131 | } | |
132 | ||
133 | static struct entry *l_next(struct entry_space *es, struct entry *e) | |
134 | { | |
135 | return to_entry(es, e->next); | |
136 | } | |
137 | ||
138 | static struct entry *l_prev(struct entry_space *es, struct entry *e) | |
139 | { | |
140 | return to_entry(es, e->prev); | |
141 | } | |
142 | ||
143 | static bool l_empty(struct ilist *l) | |
144 | { | |
145 | return l->head == INDEXER_NULL; | |
146 | } | |
147 | ||
148 | static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) | |
149 | { | |
150 | struct entry *head = l_head(es, l); | |
151 | ||
152 | e->next = l->head; | |
153 | e->prev = INDEXER_NULL; | |
154 | ||
155 | if (head) | |
156 | head->prev = l->head = to_index(es, e); | |
157 | else | |
158 | l->head = l->tail = to_index(es, e); | |
159 | ||
160 | if (!e->sentinel) | |
161 | l->nr_elts++; | |
162 | } | |
163 | ||
164 | static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) | |
165 | { | |
166 | struct entry *tail = l_tail(es, l); | |
167 | ||
168 | e->next = INDEXER_NULL; | |
169 | e->prev = l->tail; | |
170 | ||
171 | if (tail) | |
172 | tail->next = l->tail = to_index(es, e); | |
173 | else | |
174 | l->head = l->tail = to_index(es, e); | |
175 | ||
176 | if (!e->sentinel) | |
177 | l->nr_elts++; | |
178 | } | |
179 | ||
180 | static void l_add_before(struct entry_space *es, struct ilist *l, | |
181 | struct entry *old, struct entry *e) | |
182 | { | |
183 | struct entry *prev = l_prev(es, old); | |
184 | ||
185 | if (!prev) | |
186 | l_add_head(es, l, e); | |
187 | ||
188 | else { | |
189 | e->prev = old->prev; | |
190 | e->next = to_index(es, old); | |
191 | prev->next = old->prev = to_index(es, e); | |
192 | ||
193 | if (!e->sentinel) | |
194 | l->nr_elts++; | |
195 | } | |
196 | } | |
197 | ||
198 | static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) | |
199 | { | |
200 | struct entry *prev = l_prev(es, e); | |
201 | struct entry *next = l_next(es, e); | |
202 | ||
203 | if (prev) | |
204 | prev->next = e->next; | |
205 | else | |
206 | l->head = e->next; | |
207 | ||
208 | if (next) | |
209 | next->prev = e->prev; | |
210 | else | |
211 | l->tail = e->prev; | |
212 | ||
213 | if (!e->sentinel) | |
214 | l->nr_elts--; | |
215 | } | |
216 | ||
9768a10d JT |
217 | static struct entry *l_pop_head(struct entry_space *es, struct ilist *l) |
218 | { | |
219 | struct entry *e; | |
220 | ||
221 | for (e = l_head(es, l); e; e = l_next(es, e)) | |
222 | if (!e->sentinel) { | |
223 | l_del(es, l, e); | |
224 | return e; | |
225 | } | |
226 | ||
227 | return NULL; | |
228 | } | |
229 | ||
66a63635 JT |
230 | static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l) |
231 | { | |
232 | struct entry *e; | |
233 | ||
234 | for (e = l_tail(es, l); e; e = l_prev(es, e)) | |
235 | if (!e->sentinel) { | |
236 | l_del(es, l, e); | |
237 | return e; | |
238 | } | |
239 | ||
240 | return NULL; | |
241 | } | |
242 | ||
243 | /*----------------------------------------------------------------*/ | |
244 | ||
245 | /* | |
246 | * The stochastic-multi-queue is a set of lru lists stacked into levels. | |
247 | * Entries are moved up levels when they are used, which loosely orders the | |
248 | * most accessed entries in the top levels and least in the bottom. This | |
249 | * structure is *much* better than a single lru list. | |
250 | */ | |
251 | #define MAX_LEVELS 64u | |
252 | ||
253 | struct queue { | |
254 | struct entry_space *es; | |
255 | ||
86a3238c HM |
256 | unsigned int nr_elts; |
257 | unsigned int nr_levels; | |
66a63635 JT |
258 | struct ilist qs[MAX_LEVELS]; |
259 | ||
260 | /* | |
261 | * We maintain a count of the number of entries we would like in each | |
262 | * level. | |
263 | */ | |
86a3238c HM |
264 | unsigned int last_target_nr_elts; |
265 | unsigned int nr_top_levels; | |
266 | unsigned int nr_in_top_levels; | |
267 | unsigned int target_count[MAX_LEVELS]; | |
66a63635 JT |
268 | }; |
269 | ||
86a3238c | 270 | static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels) |
66a63635 | 271 | { |
86a3238c | 272 | unsigned int i; |
66a63635 JT |
273 | |
274 | q->es = es; | |
275 | q->nr_elts = 0; | |
276 | q->nr_levels = nr_levels; | |
277 | ||
278 | for (i = 0; i < q->nr_levels; i++) { | |
279 | l_init(q->qs + i); | |
280 | q->target_count[i] = 0u; | |
281 | } | |
282 | ||
283 | q->last_target_nr_elts = 0u; | |
284 | q->nr_top_levels = 0u; | |
285 | q->nr_in_top_levels = 0u; | |
286 | } | |
287 | ||
86a3238c | 288 | static unsigned int q_size(struct queue *q) |
66a63635 JT |
289 | { |
290 | return q->nr_elts; | |
291 | } | |
292 | ||
293 | /* | |
294 | * Insert an entry to the back of the given level. | |
295 | */ | |
296 | static void q_push(struct queue *q, struct entry *e) | |
297 | { | |
b29d4986 JT |
298 | BUG_ON(e->pending_work); |
299 | ||
66a63635 JT |
300 | if (!e->sentinel) |
301 | q->nr_elts++; | |
302 | ||
303 | l_add_tail(q->es, q->qs + e->level, e); | |
304 | } | |
305 | ||
b29d4986 JT |
306 | static void q_push_front(struct queue *q, struct entry *e) |
307 | { | |
308 | BUG_ON(e->pending_work); | |
309 | ||
310 | if (!e->sentinel) | |
311 | q->nr_elts++; | |
312 | ||
313 | l_add_head(q->es, q->qs + e->level, e); | |
314 | } | |
315 | ||
66a63635 JT |
316 | static void q_push_before(struct queue *q, struct entry *old, struct entry *e) |
317 | { | |
b29d4986 JT |
318 | BUG_ON(e->pending_work); |
319 | ||
66a63635 JT |
320 | if (!e->sentinel) |
321 | q->nr_elts++; | |
322 | ||
323 | l_add_before(q->es, q->qs + e->level, old, e); | |
324 | } | |
325 | ||
326 | static void q_del(struct queue *q, struct entry *e) | |
327 | { | |
328 | l_del(q->es, q->qs + e->level, e); | |
329 | if (!e->sentinel) | |
330 | q->nr_elts--; | |
331 | } | |
332 | ||
333 | /* | |
334 | * Return the oldest entry of the lowest populated level. | |
335 | */ | |
86a3238c | 336 | static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel) |
66a63635 | 337 | { |
86a3238c | 338 | unsigned int level; |
66a63635 JT |
339 | struct entry *e; |
340 | ||
341 | max_level = min(max_level, q->nr_levels); | |
342 | ||
343 | for (level = 0; level < max_level; level++) | |
344 | for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { | |
345 | if (e->sentinel) { | |
346 | if (can_cross_sentinel) | |
347 | continue; | |
348 | else | |
349 | break; | |
350 | } | |
351 | ||
352 | return e; | |
353 | } | |
354 | ||
355 | return NULL; | |
356 | } | |
357 | ||
358 | static struct entry *q_pop(struct queue *q) | |
359 | { | |
360 | struct entry *e = q_peek(q, q->nr_levels, true); | |
361 | ||
362 | if (e) | |
363 | q_del(q, e); | |
364 | ||
365 | return e; | |
366 | } | |
367 | ||
66a63635 JT |
368 | /* |
369 | * This function assumes there is a non-sentinel entry to pop. It's only | |
370 | * used by redistribute, so we know this is true. It also doesn't adjust | |
371 | * the q->nr_elts count. | |
372 | */ | |
86a3238c | 373 | static struct entry *__redist_pop_from(struct queue *q, unsigned int level) |
66a63635 JT |
374 | { |
375 | struct entry *e; | |
376 | ||
377 | for (; level < q->nr_levels; level++) | |
378 | for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) | |
379 | if (!e->sentinel) { | |
380 | l_del(q->es, q->qs + e->level, e); | |
381 | return e; | |
382 | } | |
383 | ||
384 | return NULL; | |
385 | } | |
386 | ||
86a3238c HM |
387 | static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts, |
388 | unsigned int lbegin, unsigned int lend) | |
66a63635 | 389 | { |
86a3238c | 390 | unsigned int level, nr_levels, entries_per_level, remainder; |
66a63635 JT |
391 | |
392 | BUG_ON(lbegin > lend); | |
393 | BUG_ON(lend > q->nr_levels); | |
394 | nr_levels = lend - lbegin; | |
395 | entries_per_level = safe_div(nr_elts, nr_levels); | |
396 | remainder = safe_mod(nr_elts, nr_levels); | |
397 | ||
398 | for (level = lbegin; level < lend; level++) | |
399 | q->target_count[level] = | |
400 | (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level; | |
401 | } | |
402 | ||
403 | /* | |
404 | * Typically we have fewer elements in the top few levels which allows us | |
405 | * to adjust the promote threshold nicely. | |
406 | */ | |
407 | static void q_set_targets(struct queue *q) | |
408 | { | |
409 | if (q->last_target_nr_elts == q->nr_elts) | |
410 | return; | |
411 | ||
412 | q->last_target_nr_elts = q->nr_elts; | |
413 | ||
414 | if (q->nr_top_levels > q->nr_levels) | |
415 | q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels); | |
416 | ||
417 | else { | |
418 | q_set_targets_subrange_(q, q->nr_in_top_levels, | |
419 | q->nr_levels - q->nr_top_levels, q->nr_levels); | |
420 | ||
421 | if (q->nr_in_top_levels < q->nr_elts) | |
422 | q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels, | |
423 | 0, q->nr_levels - q->nr_top_levels); | |
424 | else | |
425 | q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels); | |
426 | } | |
427 | } | |
428 | ||
429 | static void q_redistribute(struct queue *q) | |
430 | { | |
86a3238c | 431 | unsigned int target, level; |
66a63635 JT |
432 | struct ilist *l, *l_above; |
433 | struct entry *e; | |
434 | ||
435 | q_set_targets(q); | |
436 | ||
437 | for (level = 0u; level < q->nr_levels - 1u; level++) { | |
438 | l = q->qs + level; | |
439 | target = q->target_count[level]; | |
440 | ||
441 | /* | |
442 | * Pull down some entries from the level above. | |
443 | */ | |
444 | while (l->nr_elts < target) { | |
445 | e = __redist_pop_from(q, level + 1u); | |
446 | if (!e) { | |
447 | /* bug in nr_elts */ | |
448 | break; | |
449 | } | |
450 | ||
451 | e->level = level; | |
452 | l_add_tail(q->es, l, e); | |
453 | } | |
454 | ||
455 | /* | |
456 | * Push some entries up. | |
457 | */ | |
458 | l_above = q->qs + level + 1u; | |
459 | while (l->nr_elts > target) { | |
460 | e = l_pop_tail(q->es, l); | |
461 | ||
462 | if (!e) | |
463 | /* bug in nr_elts */ | |
464 | break; | |
465 | ||
466 | e->level = level + 1u; | |
b29d4986 | 467 | l_add_tail(q->es, l_above, e); |
66a63635 JT |
468 | } |
469 | } | |
470 | } | |
471 | ||
86a3238c | 472 | static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels, |
b29d4986 | 473 | struct entry *s1, struct entry *s2) |
66a63635 JT |
474 | { |
475 | struct entry *de; | |
86a3238c HM |
476 | unsigned int sentinels_passed = 0; |
477 | unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels); | |
66a63635 | 478 | |
b29d4986 | 479 | /* try and find an entry to swap with */ |
66a63635 | 480 | if (extra_levels && (e->level < q->nr_levels - 1u)) { |
b29d4986 JT |
481 | for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de)) |
482 | sentinels_passed++; | |
66a63635 | 483 | |
b29d4986 | 484 | if (de) { |
66a63635 JT |
485 | q_del(q, de); |
486 | de->level = e->level; | |
b29d4986 JT |
487 | if (s1) { |
488 | switch (sentinels_passed) { | |
489 | case 0: | |
490 | q_push_before(q, s1, de); | |
491 | break; | |
492 | ||
493 | case 1: | |
494 | q_push_before(q, s2, de); | |
495 | break; | |
66a63635 | 496 | |
b29d4986 JT |
497 | default: |
498 | q_push(q, de); | |
499 | } | |
500 | } else | |
66a63635 | 501 | q_push(q, de); |
66a63635 | 502 | } |
66a63635 JT |
503 | } |
504 | ||
b29d4986 JT |
505 | q_del(q, e); |
506 | e->level = new_level; | |
66a63635 JT |
507 | q_push(q, e); |
508 | } | |
509 | ||
66a63635 JT |
510 | /*----------------------------------------------------------------*/ |
511 | ||
512 | #define FP_SHIFT 8 | |
513 | #define SIXTEENTH (1u << (FP_SHIFT - 4u)) | |
514 | #define EIGHTH (1u << (FP_SHIFT - 3u)) | |
515 | ||
516 | struct stats { | |
86a3238c HM |
517 | unsigned int hit_threshold; |
518 | unsigned int hits; | |
519 | unsigned int misses; | |
66a63635 JT |
520 | }; |
521 | ||
522 | enum performance { | |
523 | Q_POOR, | |
524 | Q_FAIR, | |
525 | Q_WELL | |
526 | }; | |
527 | ||
86a3238c | 528 | static void stats_init(struct stats *s, unsigned int nr_levels) |
66a63635 JT |
529 | { |
530 | s->hit_threshold = (nr_levels * 3u) / 4u; | |
531 | s->hits = 0u; | |
532 | s->misses = 0u; | |
533 | } | |
534 | ||
535 | static void stats_reset(struct stats *s) | |
536 | { | |
537 | s->hits = s->misses = 0u; | |
538 | } | |
539 | ||
86a3238c | 540 | static void stats_level_accessed(struct stats *s, unsigned int level) |
66a63635 JT |
541 | { |
542 | if (level >= s->hit_threshold) | |
543 | s->hits++; | |
544 | else | |
545 | s->misses++; | |
546 | } | |
547 | ||
548 | static void stats_miss(struct stats *s) | |
549 | { | |
550 | s->misses++; | |
551 | } | |
552 | ||
553 | /* | |
554 | * There are times when we don't have any confidence in the hotspot queue. | |
555 | * Such as when a fresh cache is created and the blocks have been spread | |
556 | * out across the levels, or if an io load changes. We detect this by | |
557 | * seeing how often a lookup is in the top levels of the hotspot queue. | |
558 | */ | |
559 | static enum performance stats_assess(struct stats *s) | |
560 | { | |
86a3238c | 561 | unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses); |
66a63635 JT |
562 | |
563 | if (confidence < SIXTEENTH) | |
564 | return Q_POOR; | |
565 | ||
566 | else if (confidence < EIGHTH) | |
567 | return Q_FAIR; | |
568 | ||
569 | else | |
570 | return Q_WELL; | |
571 | } | |
572 | ||
573 | /*----------------------------------------------------------------*/ | |
574 | ||
b29d4986 | 575 | struct smq_hash_table { |
66a63635 JT |
576 | struct entry_space *es; |
577 | unsigned long long hash_bits; | |
86a3238c | 578 | unsigned int *buckets; |
66a63635 JT |
579 | }; |
580 | ||
581 | /* | |
582 | * All cache entries are stored in a chained hash table. To save space we | |
583 | * use indexing again, and only store indexes to the next entry. | |
584 | */ | |
86a3238c | 585 | static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries) |
66a63635 | 586 | { |
86a3238c | 587 | unsigned int i, nr_buckets; |
66a63635 JT |
588 | |
589 | ht->es = es; | |
590 | nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u)); | |
a3d939ae | 591 | ht->hash_bits = __ffs(nr_buckets); |
66a63635 | 592 | |
42bc47b3 | 593 | ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets))); |
66a63635 JT |
594 | if (!ht->buckets) |
595 | return -ENOMEM; | |
596 | ||
597 | for (i = 0; i < nr_buckets; i++) | |
598 | ht->buckets[i] = INDEXER_NULL; | |
599 | ||
600 | return 0; | |
601 | } | |
602 | ||
b29d4986 | 603 | static void h_exit(struct smq_hash_table *ht) |
66a63635 JT |
604 | { |
605 | vfree(ht->buckets); | |
606 | } | |
607 | ||
86a3238c | 608 | static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket) |
66a63635 JT |
609 | { |
610 | return to_entry(ht->es, ht->buckets[bucket]); | |
611 | } | |
612 | ||
b29d4986 | 613 | static struct entry *h_next(struct smq_hash_table *ht, struct entry *e) |
66a63635 JT |
614 | { |
615 | return to_entry(ht->es, e->hash_next); | |
616 | } | |
617 | ||
86a3238c | 618 | static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e) |
66a63635 JT |
619 | { |
620 | e->hash_next = ht->buckets[bucket]; | |
621 | ht->buckets[bucket] = to_index(ht->es, e); | |
622 | } | |
623 | ||
b29d4986 | 624 | static void h_insert(struct smq_hash_table *ht, struct entry *e) |
66a63635 | 625 | { |
86a3238c | 626 | unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits); |
66a63635 JT |
627 | __h_insert(ht, h, e); |
628 | } | |
629 | ||
86a3238c | 630 | static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock, |
66a63635 JT |
631 | struct entry **prev) |
632 | { | |
633 | struct entry *e; | |
634 | ||
635 | *prev = NULL; | |
636 | for (e = h_head(ht, h); e; e = h_next(ht, e)) { | |
637 | if (e->oblock == oblock) | |
638 | return e; | |
639 | ||
640 | *prev = e; | |
641 | } | |
642 | ||
643 | return NULL; | |
644 | } | |
645 | ||
86a3238c | 646 | static void __h_unlink(struct smq_hash_table *ht, unsigned int h, |
66a63635 JT |
647 | struct entry *e, struct entry *prev) |
648 | { | |
649 | if (prev) | |
650 | prev->hash_next = e->hash_next; | |
651 | else | |
652 | ht->buckets[h] = e->hash_next; | |
653 | } | |
654 | ||
655 | /* | |
656 | * Also moves each entry to the front of the bucket. | |
657 | */ | |
b29d4986 | 658 | static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock) |
66a63635 JT |
659 | { |
660 | struct entry *e, *prev; | |
86a3238c | 661 | unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits); |
66a63635 JT |
662 | |
663 | e = __h_lookup(ht, h, oblock, &prev); | |
664 | if (e && prev) { | |
665 | /* | |
666 | * Move to the front because this entry is likely | |
667 | * to be hit again. | |
668 | */ | |
669 | __h_unlink(ht, h, e, prev); | |
670 | __h_insert(ht, h, e); | |
671 | } | |
672 | ||
673 | return e; | |
674 | } | |
675 | ||
b29d4986 | 676 | static void h_remove(struct smq_hash_table *ht, struct entry *e) |
66a63635 | 677 | { |
86a3238c | 678 | unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits); |
66a63635 JT |
679 | struct entry *prev; |
680 | ||
681 | /* | |
682 | * The down side of using a singly linked list is we have to | |
683 | * iterate the bucket to remove an item. | |
684 | */ | |
685 | e = __h_lookup(ht, h, e->oblock, &prev); | |
686 | if (e) | |
687 | __h_unlink(ht, h, e, prev); | |
688 | } | |
689 | ||
690 | /*----------------------------------------------------------------*/ | |
691 | ||
692 | struct entry_alloc { | |
693 | struct entry_space *es; | |
86a3238c | 694 | unsigned int begin; |
66a63635 | 695 | |
86a3238c | 696 | unsigned int nr_allocated; |
66a63635 JT |
697 | struct ilist free; |
698 | }; | |
699 | ||
700 | static void init_allocator(struct entry_alloc *ea, struct entry_space *es, | |
86a3238c | 701 | unsigned int begin, unsigned int end) |
66a63635 | 702 | { |
86a3238c | 703 | unsigned int i; |
66a63635 JT |
704 | |
705 | ea->es = es; | |
706 | ea->nr_allocated = 0u; | |
707 | ea->begin = begin; | |
708 | ||
709 | l_init(&ea->free); | |
710 | for (i = begin; i != end; i++) | |
711 | l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i)); | |
712 | } | |
713 | ||
714 | static void init_entry(struct entry *e) | |
715 | { | |
716 | /* | |
717 | * We can't memset because that would clear the hotspot and | |
718 | * sentinel bits which remain constant. | |
719 | */ | |
720 | e->hash_next = INDEXER_NULL; | |
721 | e->next = INDEXER_NULL; | |
722 | e->prev = INDEXER_NULL; | |
723 | e->level = 0u; | |
b29d4986 | 724 | e->dirty = true; /* FIXME: audit */ |
66a63635 | 725 | e->allocated = true; |
b29d4986 JT |
726 | e->sentinel = false; |
727 | e->pending_work = false; | |
66a63635 JT |
728 | } |
729 | ||
730 | static struct entry *alloc_entry(struct entry_alloc *ea) | |
731 | { | |
732 | struct entry *e; | |
733 | ||
734 | if (l_empty(&ea->free)) | |
735 | return NULL; | |
736 | ||
9768a10d | 737 | e = l_pop_head(ea->es, &ea->free); |
66a63635 JT |
738 | init_entry(e); |
739 | ea->nr_allocated++; | |
740 | ||
741 | return e; | |
742 | } | |
743 | ||
744 | /* | |
745 | * This assumes the cblock hasn't already been allocated. | |
746 | */ | |
86a3238c | 747 | static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i) |
66a63635 JT |
748 | { |
749 | struct entry *e = __get_entry(ea->es, ea->begin + i); | |
750 | ||
751 | BUG_ON(e->allocated); | |
752 | ||
753 | l_del(ea->es, &ea->free, e); | |
754 | init_entry(e); | |
755 | ea->nr_allocated++; | |
756 | ||
757 | return e; | |
758 | } | |
759 | ||
760 | static void free_entry(struct entry_alloc *ea, struct entry *e) | |
761 | { | |
762 | BUG_ON(!ea->nr_allocated); | |
763 | BUG_ON(!e->allocated); | |
764 | ||
765 | ea->nr_allocated--; | |
766 | e->allocated = false; | |
767 | l_add_tail(ea->es, &ea->free, e); | |
768 | } | |
769 | ||
770 | static bool allocator_empty(struct entry_alloc *ea) | |
771 | { | |
772 | return l_empty(&ea->free); | |
773 | } | |
774 | ||
86a3238c | 775 | static unsigned int get_index(struct entry_alloc *ea, struct entry *e) |
66a63635 JT |
776 | { |
777 | return to_index(ea->es, e) - ea->begin; | |
778 | } | |
779 | ||
86a3238c | 780 | static struct entry *get_entry(struct entry_alloc *ea, unsigned int index) |
66a63635 JT |
781 | { |
782 | return __get_entry(ea->es, ea->begin + index); | |
783 | } | |
784 | ||
785 | /*----------------------------------------------------------------*/ | |
786 | ||
787 | #define NR_HOTSPOT_LEVELS 64u | |
788 | #define NR_CACHE_LEVELS 64u | |
789 | ||
b29d4986 JT |
790 | #define WRITEBACK_PERIOD (10ul * HZ) |
791 | #define DEMOTE_PERIOD (60ul * HZ) | |
66a63635 JT |
792 | |
793 | #define HOTSPOT_UPDATE_PERIOD (HZ) | |
b29d4986 | 794 | #define CACHE_UPDATE_PERIOD (60ul * HZ) |
66a63635 JT |
795 | |
796 | struct smq_policy { | |
797 | struct dm_cache_policy policy; | |
798 | ||
799 | /* protects everything */ | |
4051aab7 | 800 | spinlock_t lock; |
66a63635 JT |
801 | dm_cblock_t cache_size; |
802 | sector_t cache_block_size; | |
803 | ||
804 | sector_t hotspot_block_size; | |
86a3238c HM |
805 | unsigned int nr_hotspot_blocks; |
806 | unsigned int cache_blocks_per_hotspot_block; | |
807 | unsigned int hotspot_level_jump; | |
66a63635 JT |
808 | |
809 | struct entry_space es; | |
810 | struct entry_alloc writeback_sentinel_alloc; | |
811 | struct entry_alloc demote_sentinel_alloc; | |
812 | struct entry_alloc hotspot_alloc; | |
813 | struct entry_alloc cache_alloc; | |
814 | ||
815 | unsigned long *hotspot_hit_bits; | |
816 | unsigned long *cache_hit_bits; | |
817 | ||
818 | /* | |
819 | * We maintain three queues of entries. The cache proper, | |
820 | * consisting of a clean and dirty queue, containing the currently | |
821 | * active mappings. The hotspot queue uses a larger block size to | |
822 | * track blocks that are being hit frequently and potential | |
823 | * candidates for promotion to the cache. | |
824 | */ | |
825 | struct queue hotspot; | |
826 | struct queue clean; | |
827 | struct queue dirty; | |
828 | ||
829 | struct stats hotspot_stats; | |
830 | struct stats cache_stats; | |
831 | ||
832 | /* | |
833 | * Keeps track of time, incremented by the core. We use this to | |
834 | * avoid attributing multiple hits within the same tick. | |
66a63635 | 835 | */ |
86a3238c | 836 | unsigned int tick; |
66a63635 JT |
837 | |
838 | /* | |
839 | * The hash tables allows us to quickly find an entry by origin | |
840 | * block. | |
841 | */ | |
b29d4986 JT |
842 | struct smq_hash_table table; |
843 | struct smq_hash_table hotspot_table; | |
66a63635 JT |
844 | |
845 | bool current_writeback_sentinels; | |
846 | unsigned long next_writeback_period; | |
847 | ||
848 | bool current_demote_sentinels; | |
849 | unsigned long next_demote_period; | |
850 | ||
86a3238c HM |
851 | unsigned int write_promote_level; |
852 | unsigned int read_promote_level; | |
66a63635 JT |
853 | |
854 | unsigned long next_hotspot_period; | |
855 | unsigned long next_cache_period; | |
b29d4986 JT |
856 | |
857 | struct background_tracker *bg_work; | |
858 | ||
859 | bool migrations_allowed; | |
66a63635 JT |
860 | }; |
861 | ||
862 | /*----------------------------------------------------------------*/ | |
863 | ||
86a3238c | 864 | static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which) |
66a63635 JT |
865 | { |
866 | return get_entry(ea, which ? level : NR_CACHE_LEVELS + level); | |
867 | } | |
868 | ||
86a3238c | 869 | static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level) |
66a63635 JT |
870 | { |
871 | return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); | |
872 | } | |
873 | ||
86a3238c | 874 | static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level) |
66a63635 JT |
875 | { |
876 | return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); | |
877 | } | |
878 | ||
879 | static void __update_writeback_sentinels(struct smq_policy *mq) | |
880 | { | |
86a3238c | 881 | unsigned int level; |
66a63635 JT |
882 | struct queue *q = &mq->dirty; |
883 | struct entry *sentinel; | |
884 | ||
885 | for (level = 0; level < q->nr_levels; level++) { | |
886 | sentinel = writeback_sentinel(mq, level); | |
887 | q_del(q, sentinel); | |
888 | q_push(q, sentinel); | |
889 | } | |
890 | } | |
891 | ||
892 | static void __update_demote_sentinels(struct smq_policy *mq) | |
893 | { | |
86a3238c | 894 | unsigned int level; |
66a63635 JT |
895 | struct queue *q = &mq->clean; |
896 | struct entry *sentinel; | |
897 | ||
898 | for (level = 0; level < q->nr_levels; level++) { | |
899 | sentinel = demote_sentinel(mq, level); | |
900 | q_del(q, sentinel); | |
901 | q_push(q, sentinel); | |
902 | } | |
903 | } | |
904 | ||
905 | static void update_sentinels(struct smq_policy *mq) | |
906 | { | |
907 | if (time_after(jiffies, mq->next_writeback_period)) { | |
66a63635 JT |
908 | mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; |
909 | mq->current_writeback_sentinels = !mq->current_writeback_sentinels; | |
b29d4986 | 910 | __update_writeback_sentinels(mq); |
66a63635 JT |
911 | } |
912 | ||
913 | if (time_after(jiffies, mq->next_demote_period)) { | |
66a63635 JT |
914 | mq->next_demote_period = jiffies + DEMOTE_PERIOD; |
915 | mq->current_demote_sentinels = !mq->current_demote_sentinels; | |
b29d4986 | 916 | __update_demote_sentinels(mq); |
66a63635 JT |
917 | } |
918 | } | |
919 | ||
920 | static void __sentinels_init(struct smq_policy *mq) | |
921 | { | |
86a3238c | 922 | unsigned int level; |
66a63635 JT |
923 | struct entry *sentinel; |
924 | ||
925 | for (level = 0; level < NR_CACHE_LEVELS; level++) { | |
926 | sentinel = writeback_sentinel(mq, level); | |
927 | sentinel->level = level; | |
928 | q_push(&mq->dirty, sentinel); | |
929 | ||
930 | sentinel = demote_sentinel(mq, level); | |
931 | sentinel->level = level; | |
932 | q_push(&mq->clean, sentinel); | |
933 | } | |
934 | } | |
935 | ||
936 | static void sentinels_init(struct smq_policy *mq) | |
937 | { | |
938 | mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; | |
939 | mq->next_demote_period = jiffies + DEMOTE_PERIOD; | |
940 | ||
941 | mq->current_writeback_sentinels = false; | |
942 | mq->current_demote_sentinels = false; | |
943 | __sentinels_init(mq); | |
944 | ||
945 | mq->current_writeback_sentinels = !mq->current_writeback_sentinels; | |
946 | mq->current_demote_sentinels = !mq->current_demote_sentinels; | |
947 | __sentinels_init(mq); | |
948 | } | |
949 | ||
950 | /*----------------------------------------------------------------*/ | |
951 | ||
b29d4986 | 952 | static void del_queue(struct smq_policy *mq, struct entry *e) |
66a63635 | 953 | { |
b29d4986 | 954 | q_del(e->dirty ? &mq->dirty : &mq->clean, e); |
66a63635 JT |
955 | } |
956 | ||
b29d4986 | 957 | static void push_queue(struct smq_policy *mq, struct entry *e) |
66a63635 | 958 | { |
b29d4986 JT |
959 | if (e->dirty) |
960 | q_push(&mq->dirty, e); | |
961 | else | |
962 | q_push(&mq->clean, e); | |
66a63635 JT |
963 | } |
964 | ||
b29d4986 JT |
965 | // !h, !q, a -> h, q, a |
966 | static void push(struct smq_policy *mq, struct entry *e) | |
66a63635 | 967 | { |
b29d4986 JT |
968 | h_insert(&mq->table, e); |
969 | if (!e->pending_work) | |
970 | push_queue(mq, e); | |
66a63635 JT |
971 | } |
972 | ||
b29d4986 | 973 | static void push_queue_front(struct smq_policy *mq, struct entry *e) |
66a63635 | 974 | { |
b29d4986 JT |
975 | if (e->dirty) |
976 | q_push_front(&mq->dirty, e); | |
977 | else | |
978 | q_push_front(&mq->clean, e); | |
66a63635 JT |
979 | } |
980 | ||
b29d4986 | 981 | static void push_front(struct smq_policy *mq, struct entry *e) |
66a63635 | 982 | { |
b29d4986 JT |
983 | h_insert(&mq->table, e); |
984 | if (!e->pending_work) | |
985 | push_queue_front(mq, e); | |
66a63635 JT |
986 | } |
987 | ||
988 | static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) | |
989 | { | |
990 | return to_cblock(get_index(&mq->cache_alloc, e)); | |
991 | } | |
992 | ||
993 | static void requeue(struct smq_policy *mq, struct entry *e) | |
994 | { | |
b29d4986 JT |
995 | /* |
996 | * Pending work has temporarily been taken out of the queues. | |
997 | */ | |
998 | if (e->pending_work) | |
999 | return; | |
66a63635 JT |
1000 | |
1001 | if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { | |
b29d4986 JT |
1002 | if (!e->dirty) { |
1003 | q_requeue(&mq->clean, e, 1u, NULL, NULL); | |
1004 | return; | |
66a63635 | 1005 | } |
b29d4986 JT |
1006 | |
1007 | q_requeue(&mq->dirty, e, 1u, | |
1008 | get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), | |
1009 | get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); | |
66a63635 JT |
1010 | } |
1011 | } | |
1012 | ||
86a3238c | 1013 | static unsigned int default_promote_level(struct smq_policy *mq) |
66a63635 JT |
1014 | { |
1015 | /* | |
1016 | * The promote level depends on the current performance of the | |
1017 | * cache. | |
1018 | * | |
1019 | * If the cache is performing badly, then we can't afford | |
1020 | * to promote much without causing performance to drop below that | |
1021 | * of the origin device. | |
1022 | * | |
1023 | * If the cache is performing well, then we don't need to promote | |
1024 | * much. If it isn't broken, don't fix it. | |
1025 | * | |
1026 | * If the cache is middling then we promote more. | |
1027 | * | |
1028 | * This scheme reminds me of a graph of entropy vs probability of a | |
1029 | * binary variable. | |
1030 | */ | |
302f0351 CIK |
1031 | static const unsigned int table[] = { |
1032 | 1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1 | |
1033 | }; | |
66a63635 | 1034 | |
86a3238c HM |
1035 | unsigned int hits = mq->cache_stats.hits; |
1036 | unsigned int misses = mq->cache_stats.misses; | |
1037 | unsigned int index = safe_div(hits << 4u, hits + misses); | |
66a63635 JT |
1038 | return table[index]; |
1039 | } | |
1040 | ||
1041 | static void update_promote_levels(struct smq_policy *mq) | |
1042 | { | |
1043 | /* | |
1044 | * If there are unused cache entries then we want to be really | |
1045 | * eager to promote. | |
1046 | */ | |
86a3238c | 1047 | unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ? |
66a63635 JT |
1048 | default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u); |
1049 | ||
b29d4986 JT |
1050 | threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS); |
1051 | ||
66a63635 JT |
1052 | /* |
1053 | * If the hotspot queue is performing badly then we have little | |
1054 | * confidence that we know which blocks to promote. So we cut down | |
1055 | * the amount of promotions. | |
1056 | */ | |
1057 | switch (stats_assess(&mq->hotspot_stats)) { | |
1058 | case Q_POOR: | |
1059 | threshold_level /= 4u; | |
1060 | break; | |
1061 | ||
1062 | case Q_FAIR: | |
1063 | threshold_level /= 2u; | |
1064 | break; | |
1065 | ||
1066 | case Q_WELL: | |
1067 | break; | |
1068 | } | |
1069 | ||
1070 | mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level; | |
b29d4986 | 1071 | mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level); |
66a63635 JT |
1072 | } |
1073 | ||
1074 | /* | |
1075 | * If the hotspot queue is performing badly, then we try and move entries | |
1076 | * around more quickly. | |
1077 | */ | |
1078 | static void update_level_jump(struct smq_policy *mq) | |
1079 | { | |
1080 | switch (stats_assess(&mq->hotspot_stats)) { | |
1081 | case Q_POOR: | |
1082 | mq->hotspot_level_jump = 4u; | |
1083 | break; | |
1084 | ||
1085 | case Q_FAIR: | |
1086 | mq->hotspot_level_jump = 2u; | |
1087 | break; | |
1088 | ||
1089 | case Q_WELL: | |
1090 | mq->hotspot_level_jump = 1u; | |
1091 | break; | |
1092 | } | |
1093 | } | |
1094 | ||
1095 | static void end_hotspot_period(struct smq_policy *mq) | |
1096 | { | |
1097 | clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); | |
1098 | update_promote_levels(mq); | |
1099 | ||
1100 | if (time_after(jiffies, mq->next_hotspot_period)) { | |
1101 | update_level_jump(mq); | |
1102 | q_redistribute(&mq->hotspot); | |
1103 | stats_reset(&mq->hotspot_stats); | |
1104 | mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD; | |
1105 | } | |
1106 | } | |
1107 | ||
1108 | static void end_cache_period(struct smq_policy *mq) | |
1109 | { | |
1110 | if (time_after(jiffies, mq->next_cache_period)) { | |
1111 | clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); | |
1112 | ||
1113 | q_redistribute(&mq->dirty); | |
1114 | q_redistribute(&mq->clean); | |
1115 | stats_reset(&mq->cache_stats); | |
1116 | ||
1117 | mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; | |
1118 | } | |
1119 | } | |
1120 | ||
b29d4986 JT |
1121 | /*----------------------------------------------------------------*/ |
1122 | ||
1123 | /* | |
1124 | * Targets are given as a percentage. | |
1125 | */ | |
1126 | #define CLEAN_TARGET 25u | |
1127 | #define FREE_TARGET 25u | |
1128 | ||
86a3238c | 1129 | static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p) |
66a63635 | 1130 | { |
b29d4986 JT |
1131 | return from_cblock(mq->cache_size) * p / 100u; |
1132 | } | |
1133 | ||
1134 | static bool clean_target_met(struct smq_policy *mq, bool idle) | |
1135 | { | |
1136 | /* | |
1137 | * Cache entries may not be populated. So we cannot rely on the | |
1138 | * size of the clean queue. | |
1139 | */ | |
97dfb203 | 1140 | if (idle) { |
66a63635 | 1141 | /* |
b29d4986 | 1142 | * We'd like to clean everything. |
66a63635 | 1143 | */ |
b29d4986 | 1144 | return q_size(&mq->dirty) == 0u; |
97dfb203 MS |
1145 | } |
1146 | ||
2e633095 JT |
1147 | /* |
1148 | * If we're busy we don't worry about cleaning at all. | |
1149 | */ | |
1150 | return true; | |
b29d4986 | 1151 | } |
66a63635 | 1152 | |
6cf4cc8f | 1153 | static bool free_target_met(struct smq_policy *mq) |
b29d4986 | 1154 | { |
86a3238c | 1155 | unsigned int nr_free; |
66a63635 | 1156 | |
97dfb203 MS |
1157 | nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; |
1158 | return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= | |
1159 | percent_to_target(mq, FREE_TARGET); | |
66a63635 JT |
1160 | } |
1161 | ||
b29d4986 JT |
1162 | /*----------------------------------------------------------------*/ |
1163 | ||
1164 | static void mark_pending(struct smq_policy *mq, struct entry *e) | |
1165 | { | |
1166 | BUG_ON(e->sentinel); | |
1167 | BUG_ON(!e->allocated); | |
1168 | BUG_ON(e->pending_work); | |
1169 | e->pending_work = true; | |
1170 | } | |
1171 | ||
1172 | static void clear_pending(struct smq_policy *mq, struct entry *e) | |
1173 | { | |
1174 | BUG_ON(!e->pending_work); | |
1175 | e->pending_work = false; | |
1176 | } | |
1177 | ||
deb71918 | 1178 | static void queue_writeback(struct smq_policy *mq, bool idle) |
b29d4986 JT |
1179 | { |
1180 | int r; | |
1181 | struct policy_work work; | |
1182 | struct entry *e; | |
1183 | ||
deb71918 | 1184 | e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); |
b29d4986 JT |
1185 | if (e) { |
1186 | mark_pending(mq, e); | |
1187 | q_del(&mq->dirty, e); | |
1188 | ||
1189 | work.op = POLICY_WRITEBACK; | |
1190 | work.oblock = e->oblock; | |
1191 | work.cblock = infer_cblock(mq, e); | |
1192 | ||
1193 | r = btracker_queue(mq->bg_work, &work, NULL); | |
1e72a8e8 JT |
1194 | if (r) { |
1195 | clear_pending(mq, e); | |
1196 | q_push_front(&mq->dirty, e); | |
1197 | } | |
b29d4986 JT |
1198 | } |
1199 | } | |
1200 | ||
1201 | static void queue_demotion(struct smq_policy *mq) | |
1202 | { | |
1e72a8e8 | 1203 | int r; |
b29d4986 JT |
1204 | struct policy_work work; |
1205 | struct entry *e; | |
1206 | ||
bab5d988 | 1207 | if (WARN_ON_ONCE(!mq->migrations_allowed)) |
b29d4986 JT |
1208 | return; |
1209 | ||
a8cd1eba | 1210 | e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); |
b29d4986 | 1211 | if (!e) { |
78c45607 | 1212 | if (!clean_target_met(mq, true)) |
deb71918 | 1213 | queue_writeback(mq, false); |
b29d4986 JT |
1214 | return; |
1215 | } | |
1216 | ||
1217 | mark_pending(mq, e); | |
1218 | q_del(&mq->clean, e); | |
1219 | ||
1220 | work.op = POLICY_DEMOTE; | |
1221 | work.oblock = e->oblock; | |
1222 | work.cblock = infer_cblock(mq, e); | |
1e72a8e8 JT |
1223 | r = btracker_queue(mq->bg_work, &work, NULL); |
1224 | if (r) { | |
1225 | clear_pending(mq, e); | |
1226 | q_push_front(&mq->clean, e); | |
1227 | } | |
b29d4986 JT |
1228 | } |
1229 | ||
1230 | static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, | |
1231 | struct policy_work **workp) | |
1232 | { | |
1e72a8e8 | 1233 | int r; |
b29d4986 JT |
1234 | struct entry *e; |
1235 | struct policy_work work; | |
1236 | ||
1237 | if (!mq->migrations_allowed) | |
1238 | return; | |
1239 | ||
1240 | if (allocator_empty(&mq->cache_alloc)) { | |
ce1d64e8 JT |
1241 | /* |
1242 | * We always claim to be 'idle' to ensure some demotions happen | |
1243 | * with continuous loads. | |
1244 | */ | |
6cf4cc8f | 1245 | if (!free_target_met(mq)) |
b29d4986 JT |
1246 | queue_demotion(mq); |
1247 | return; | |
1248 | } | |
1249 | ||
1250 | if (btracker_promotion_already_present(mq->bg_work, oblock)) | |
1251 | return; | |
1252 | ||
1253 | /* | |
1254 | * We allocate the entry now to reserve the cblock. If the | |
1255 | * background work is aborted we must remember to free it. | |
1256 | */ | |
1257 | e = alloc_entry(&mq->cache_alloc); | |
1258 | BUG_ON(!e); | |
1259 | e->pending_work = true; | |
1260 | work.op = POLICY_PROMOTE; | |
1261 | work.oblock = oblock; | |
1262 | work.cblock = infer_cblock(mq, e); | |
1e72a8e8 JT |
1263 | r = btracker_queue(mq->bg_work, &work, workp); |
1264 | if (r) | |
1265 | free_entry(&mq->cache_alloc, e); | |
b29d4986 JT |
1266 | } |
1267 | ||
1268 | /*----------------------------------------------------------------*/ | |
1269 | ||
66a63635 JT |
1270 | enum promote_result { |
1271 | PROMOTE_NOT, | |
1272 | PROMOTE_TEMPORARY, | |
1273 | PROMOTE_PERMANENT | |
1274 | }; | |
1275 | ||
1276 | /* | |
1277 | * Converts a boolean into a promote result. | |
1278 | */ | |
1279 | static enum promote_result maybe_promote(bool promote) | |
1280 | { | |
1281 | return promote ? PROMOTE_PERMANENT : PROMOTE_NOT; | |
1282 | } | |
1283 | ||
b29d4986 JT |
1284 | static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, |
1285 | int data_dir, bool fast_promote) | |
66a63635 | 1286 | { |
b29d4986 | 1287 | if (data_dir == WRITE) { |
66a63635 JT |
1288 | if (!allocator_empty(&mq->cache_alloc) && fast_promote) |
1289 | return PROMOTE_TEMPORARY; | |
1290 | ||
b29d4986 | 1291 | return maybe_promote(hs_e->level >= mq->write_promote_level); |
66a63635 JT |
1292 | } else |
1293 | return maybe_promote(hs_e->level >= mq->read_promote_level); | |
1294 | } | |
1295 | ||
66a63635 JT |
1296 | static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b) |
1297 | { | |
1298 | sector_t r = from_oblock(b); | |
1299 | (void) sector_div(r, mq->cache_blocks_per_hotspot_block); | |
1300 | return to_oblock(r); | |
1301 | } | |
1302 | ||
b29d4986 | 1303 | static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b) |
66a63635 | 1304 | { |
86a3238c | 1305 | unsigned int hi; |
66a63635 JT |
1306 | dm_oblock_t hb = to_hblock(mq, b); |
1307 | struct entry *e = h_lookup(&mq->hotspot_table, hb); | |
1308 | ||
1309 | if (e) { | |
1310 | stats_level_accessed(&mq->hotspot_stats, e->level); | |
1311 | ||
1312 | hi = get_index(&mq->hotspot_alloc, e); | |
1313 | q_requeue(&mq->hotspot, e, | |
1314 | test_and_set_bit(hi, mq->hotspot_hit_bits) ? | |
b29d4986 JT |
1315 | 0u : mq->hotspot_level_jump, |
1316 | NULL, NULL); | |
66a63635 JT |
1317 | |
1318 | } else { | |
1319 | stats_miss(&mq->hotspot_stats); | |
1320 | ||
1321 | e = alloc_entry(&mq->hotspot_alloc); | |
1322 | if (!e) { | |
1323 | e = q_pop(&mq->hotspot); | |
1324 | if (e) { | |
1325 | h_remove(&mq->hotspot_table, e); | |
1326 | hi = get_index(&mq->hotspot_alloc, e); | |
1327 | clear_bit(hi, mq->hotspot_hit_bits); | |
1328 | } | |
1329 | ||
1330 | } | |
1331 | ||
1332 | if (e) { | |
1333 | e->oblock = hb; | |
1334 | q_push(&mq->hotspot, e); | |
1335 | h_insert(&mq->hotspot_table, e); | |
1336 | } | |
1337 | } | |
1338 | ||
1339 | return e; | |
1340 | } | |
1341 | ||
66a63635 JT |
1342 | /*----------------------------------------------------------------*/ |
1343 | ||
1344 | /* | |
1345 | * Public interface, via the policy struct. See dm-cache-policy.h for a | |
1346 | * description of these. | |
1347 | */ | |
1348 | ||
1349 | static struct smq_policy *to_smq_policy(struct dm_cache_policy *p) | |
1350 | { | |
1351 | return container_of(p, struct smq_policy, policy); | |
1352 | } | |
1353 | ||
1354 | static void smq_destroy(struct dm_cache_policy *p) | |
1355 | { | |
1356 | struct smq_policy *mq = to_smq_policy(p); | |
1357 | ||
b29d4986 | 1358 | btracker_destroy(mq->bg_work); |
66a63635 JT |
1359 | h_exit(&mq->hotspot_table); |
1360 | h_exit(&mq->table); | |
1361 | free_bitset(mq->hotspot_hit_bits); | |
1362 | free_bitset(mq->cache_hit_bits); | |
1363 | space_exit(&mq->es); | |
1364 | kfree(mq); | |
1365 | } | |
1366 | ||
b29d4986 | 1367 | /*----------------------------------------------------------------*/ |
66a63635 | 1368 | |
b29d4986 JT |
1369 | static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock, |
1370 | int data_dir, bool fast_copy, | |
1371 | struct policy_work **work, bool *background_work) | |
66a63635 | 1372 | { |
b29d4986 JT |
1373 | struct entry *e, *hs_e; |
1374 | enum promote_result pr; | |
1375 | ||
1376 | *background_work = false; | |
66a63635 | 1377 | |
66a63635 JT |
1378 | e = h_lookup(&mq->table, oblock); |
1379 | if (e) { | |
b29d4986 JT |
1380 | stats_level_accessed(&mq->cache_stats, e->level); |
1381 | ||
1382 | requeue(mq, e); | |
66a63635 | 1383 | *cblock = infer_cblock(mq, e); |
b29d4986 | 1384 | return 0; |
66a63635 | 1385 | |
b29d4986 JT |
1386 | } else { |
1387 | stats_miss(&mq->cache_stats); | |
66a63635 | 1388 | |
b29d4986 JT |
1389 | /* |
1390 | * The hotspot queue only gets updated with misses. | |
1391 | */ | |
1392 | hs_e = update_hotspot_queue(mq, oblock); | |
66a63635 | 1393 | |
b29d4986 JT |
1394 | pr = should_promote(mq, hs_e, data_dir, fast_copy); |
1395 | if (pr != PROMOTE_NOT) { | |
1396 | queue_promotion(mq, oblock, work); | |
1397 | *background_work = true; | |
1398 | } | |
66a63635 | 1399 | |
b29d4986 JT |
1400 | return -ENOENT; |
1401 | } | |
66a63635 JT |
1402 | } |
1403 | ||
b29d4986 JT |
1404 | static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock, |
1405 | int data_dir, bool fast_copy, | |
1406 | bool *background_work) | |
66a63635 | 1407 | { |
b29d4986 | 1408 | int r; |
4051aab7 | 1409 | unsigned long flags; |
66a63635 JT |
1410 | struct smq_policy *mq = to_smq_policy(p); |
1411 | ||
4051aab7 | 1412 | spin_lock_irqsave(&mq->lock, flags); |
b29d4986 JT |
1413 | r = __lookup(mq, oblock, cblock, |
1414 | data_dir, fast_copy, | |
1415 | NULL, background_work); | |
4051aab7 | 1416 | spin_unlock_irqrestore(&mq->lock, flags); |
b29d4986 JT |
1417 | |
1418 | return r; | |
66a63635 JT |
1419 | } |
1420 | ||
b29d4986 JT |
1421 | static int smq_lookup_with_work(struct dm_cache_policy *p, |
1422 | dm_oblock_t oblock, dm_cblock_t *cblock, | |
1423 | int data_dir, bool fast_copy, | |
1424 | struct policy_work **work) | |
66a63635 | 1425 | { |
b29d4986 JT |
1426 | int r; |
1427 | bool background_queued; | |
4051aab7 | 1428 | unsigned long flags; |
b29d4986 | 1429 | struct smq_policy *mq = to_smq_policy(p); |
66a63635 | 1430 | |
4051aab7 | 1431 | spin_lock_irqsave(&mq->lock, flags); |
b29d4986 | 1432 | r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued); |
4051aab7 | 1433 | spin_unlock_irqrestore(&mq->lock, flags); |
66a63635 | 1434 | |
b29d4986 | 1435 | return r; |
9d1b404c JT |
1436 | } |
1437 | ||
b29d4986 JT |
1438 | static int smq_get_background_work(struct dm_cache_policy *p, bool idle, |
1439 | struct policy_work **result) | |
66a63635 | 1440 | { |
b29d4986 JT |
1441 | int r; |
1442 | unsigned long flags; | |
66a63635 | 1443 | struct smq_policy *mq = to_smq_policy(p); |
66a63635 | 1444 | |
b29d4986 JT |
1445 | spin_lock_irqsave(&mq->lock, flags); |
1446 | r = btracker_issue(mq->bg_work, result); | |
1447 | if (r == -ENODATA) { | |
6cf4cc8f | 1448 | if (!clean_target_met(mq, idle)) { |
deb71918 | 1449 | queue_writeback(mq, idle); |
6cf4cc8f JT |
1450 | r = btracker_issue(mq->bg_work, result); |
1451 | } | |
b29d4986 JT |
1452 | } |
1453 | spin_unlock_irqrestore(&mq->lock, flags); | |
66a63635 | 1454 | |
b29d4986 | 1455 | return r; |
66a63635 JT |
1456 | } |
1457 | ||
b29d4986 JT |
1458 | /* |
1459 | * We need to clear any pending work flags that have been set, and in the | |
1460 | * case of promotion free the entry for the destination cblock. | |
1461 | */ | |
1462 | static void __complete_background_work(struct smq_policy *mq, | |
1463 | struct policy_work *work, | |
1464 | bool success) | |
1465 | { | |
1466 | struct entry *e = get_entry(&mq->cache_alloc, | |
1467 | from_cblock(work->cblock)); | |
1468 | ||
1469 | switch (work->op) { | |
1470 | case POLICY_PROMOTE: | |
1471 | // !h, !q, a | |
1472 | clear_pending(mq, e); | |
1473 | if (success) { | |
1474 | e->oblock = work->oblock; | |
4d44ec5a | 1475 | e->level = NR_CACHE_LEVELS - 1; |
b29d4986 JT |
1476 | push(mq, e); |
1477 | // h, q, a | |
1478 | } else { | |
1479 | free_entry(&mq->cache_alloc, e); | |
1480 | // !h, !q, !a | |
1481 | } | |
1482 | break; | |
66a63635 | 1483 | |
b29d4986 JT |
1484 | case POLICY_DEMOTE: |
1485 | // h, !q, a | |
1486 | if (success) { | |
1487 | h_remove(&mq->table, e); | |
1488 | free_entry(&mq->cache_alloc, e); | |
1489 | // !h, !q, !a | |
1490 | } else { | |
1491 | clear_pending(mq, e); | |
1492 | push_queue(mq, e); | |
1493 | // h, q, a | |
1494 | } | |
1495 | break; | |
66a63635 | 1496 | |
b29d4986 JT |
1497 | case POLICY_WRITEBACK: |
1498 | // h, !q, a | |
1499 | clear_pending(mq, e); | |
1500 | push_queue(mq, e); | |
1501 | // h, q, a | |
1502 | break; | |
1503 | } | |
1504 | ||
1505 | btracker_complete(mq->bg_work, work); | |
66a63635 JT |
1506 | } |
1507 | ||
b29d4986 JT |
1508 | static void smq_complete_background_work(struct dm_cache_policy *p, |
1509 | struct policy_work *work, | |
1510 | bool success) | |
66a63635 | 1511 | { |
4051aab7 | 1512 | unsigned long flags; |
b29d4986 | 1513 | struct smq_policy *mq = to_smq_policy(p); |
66a63635 | 1514 | |
4051aab7 | 1515 | spin_lock_irqsave(&mq->lock, flags); |
b29d4986 | 1516 | __complete_background_work(mq, work, success); |
4051aab7 | 1517 | spin_unlock_irqrestore(&mq->lock, flags); |
66a63635 JT |
1518 | } |
1519 | ||
b29d4986 JT |
1520 | // in_hash(oblock) -> in_hash(oblock) |
1521 | static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set) | |
66a63635 JT |
1522 | { |
1523 | struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); | |
1524 | ||
b29d4986 JT |
1525 | if (e->pending_work) |
1526 | e->dirty = set; | |
1527 | else { | |
1528 | del_queue(mq, e); | |
1529 | e->dirty = set; | |
1530 | push_queue(mq, e); | |
1531 | } | |
66a63635 JT |
1532 | } |
1533 | ||
b29d4986 | 1534 | static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock) |
66a63635 | 1535 | { |
4051aab7 | 1536 | unsigned long flags; |
66a63635 JT |
1537 | struct smq_policy *mq = to_smq_policy(p); |
1538 | ||
4051aab7 | 1539 | spin_lock_irqsave(&mq->lock, flags); |
b29d4986 | 1540 | __smq_set_clear_dirty(mq, cblock, true); |
4051aab7 | 1541 | spin_unlock_irqrestore(&mq->lock, flags); |
66a63635 JT |
1542 | } |
1543 | ||
b29d4986 | 1544 | static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock) |
66a63635 | 1545 | { |
b29d4986 JT |
1546 | struct smq_policy *mq = to_smq_policy(p); |
1547 | unsigned long flags; | |
66a63635 | 1548 | |
b29d4986 JT |
1549 | spin_lock_irqsave(&mq->lock, flags); |
1550 | __smq_set_clear_dirty(mq, cblock, false); | |
1551 | spin_unlock_irqrestore(&mq->lock, flags); | |
66a63635 JT |
1552 | } |
1553 | ||
86a3238c | 1554 | static unsigned int random_level(dm_cblock_t cblock) |
66a63635 | 1555 | { |
b29d4986 JT |
1556 | return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1); |
1557 | } | |
66a63635 | 1558 | |
b29d4986 JT |
1559 | static int smq_load_mapping(struct dm_cache_policy *p, |
1560 | dm_oblock_t oblock, dm_cblock_t cblock, | |
1561 | bool dirty, uint32_t hint, bool hint_valid) | |
1562 | { | |
1563 | struct smq_policy *mq = to_smq_policy(p); | |
1564 | struct entry *e; | |
66a63635 | 1565 | |
b29d4986 JT |
1566 | e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); |
1567 | e->oblock = oblock; | |
1568 | e->dirty = dirty; | |
1569 | e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock); | |
1570 | e->pending_work = false; | |
66a63635 | 1571 | |
b29d4986 JT |
1572 | /* |
1573 | * When we load mappings we push ahead of both sentinels in order to | |
1574 | * allow demotions and cleaning to occur immediately. | |
1575 | */ | |
1576 | push_front(mq, e); | |
66a63635 JT |
1577 | |
1578 | return 0; | |
1579 | } | |
1580 | ||
b29d4986 | 1581 | static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock) |
66a63635 | 1582 | { |
66a63635 | 1583 | struct smq_policy *mq = to_smq_policy(p); |
b29d4986 | 1584 | struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); |
66a63635 | 1585 | |
b29d4986 JT |
1586 | if (!e->allocated) |
1587 | return -ENODATA; | |
66a63635 | 1588 | |
b29d4986 JT |
1589 | // FIXME: what if this block has pending background work? |
1590 | del_queue(mq, e); | |
1591 | h_remove(&mq->table, e); | |
1592 | free_entry(&mq->cache_alloc, e); | |
1593 | return 0; | |
66a63635 JT |
1594 | } |
1595 | ||
b29d4986 | 1596 | static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock) |
66a63635 JT |
1597 | { |
1598 | struct smq_policy *mq = to_smq_policy(p); | |
b29d4986 | 1599 | struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); |
66a63635 | 1600 | |
b29d4986 JT |
1601 | if (!e->allocated) |
1602 | return 0; | |
1603 | ||
1604 | return e->level; | |
66a63635 JT |
1605 | } |
1606 | ||
1607 | static dm_cblock_t smq_residency(struct dm_cache_policy *p) | |
1608 | { | |
1609 | dm_cblock_t r; | |
4051aab7 | 1610 | unsigned long flags; |
66a63635 JT |
1611 | struct smq_policy *mq = to_smq_policy(p); |
1612 | ||
4051aab7 | 1613 | spin_lock_irqsave(&mq->lock, flags); |
66a63635 | 1614 | r = to_cblock(mq->cache_alloc.nr_allocated); |
4051aab7 | 1615 | spin_unlock_irqrestore(&mq->lock, flags); |
66a63635 JT |
1616 | |
1617 | return r; | |
1618 | } | |
1619 | ||
fba10109 | 1620 | static void smq_tick(struct dm_cache_policy *p, bool can_block) |
66a63635 JT |
1621 | { |
1622 | struct smq_policy *mq = to_smq_policy(p); | |
1623 | unsigned long flags; | |
1624 | ||
4051aab7 JT |
1625 | spin_lock_irqsave(&mq->lock, flags); |
1626 | mq->tick++; | |
1627 | update_sentinels(mq); | |
1628 | end_hotspot_period(mq); | |
1629 | end_cache_period(mq); | |
1630 | spin_unlock_irqrestore(&mq->lock, flags); | |
66a63635 JT |
1631 | } |
1632 | ||
b29d4986 JT |
1633 | static void smq_allow_migrations(struct dm_cache_policy *p, bool allow) |
1634 | { | |
1635 | struct smq_policy *mq = to_smq_policy(p); | |
1636 | mq->migrations_allowed = allow; | |
1637 | } | |
1638 | ||
9ed84698 JT |
1639 | /* |
1640 | * smq has no config values, but the old mq policy did. To avoid breaking | |
1641 | * software we continue to accept these configurables for the mq policy, | |
1642 | * but they have no effect. | |
1643 | */ | |
1644 | static int mq_set_config_value(struct dm_cache_policy *p, | |
1645 | const char *key, const char *value) | |
1646 | { | |
1647 | unsigned long tmp; | |
1648 | ||
1649 | if (kstrtoul(value, 10, &tmp)) | |
1650 | return -EINVAL; | |
1651 | ||
1652 | if (!strcasecmp(key, "random_threshold") || | |
1653 | !strcasecmp(key, "sequential_threshold") || | |
1654 | !strcasecmp(key, "discard_promote_adjustment") || | |
1655 | !strcasecmp(key, "read_promote_adjustment") || | |
1656 | !strcasecmp(key, "write_promote_adjustment")) { | |
1657 | DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key); | |
1658 | return 0; | |
1659 | } | |
1660 | ||
1661 | return -EINVAL; | |
1662 | } | |
1663 | ||
1664 | static int mq_emit_config_values(struct dm_cache_policy *p, char *result, | |
86a3238c | 1665 | unsigned int maxlen, ssize_t *sz_ptr) |
9ed84698 JT |
1666 | { |
1667 | ssize_t sz = *sz_ptr; | |
1668 | ||
1669 | DMEMIT("10 random_threshold 0 " | |
1670 | "sequential_threshold 0 " | |
1671 | "discard_promote_adjustment 0 " | |
1672 | "read_promote_adjustment 0 " | |
1673 | "write_promote_adjustment 0 "); | |
1674 | ||
1675 | *sz_ptr = sz; | |
1676 | return 0; | |
1677 | } | |
1678 | ||
66a63635 | 1679 | /* Init the policy plugin interface function pointers. */ |
9ed84698 | 1680 | static void init_policy_functions(struct smq_policy *mq, bool mimic_mq) |
66a63635 JT |
1681 | { |
1682 | mq->policy.destroy = smq_destroy; | |
66a63635 | 1683 | mq->policy.lookup = smq_lookup; |
b29d4986 JT |
1684 | mq->policy.lookup_with_work = smq_lookup_with_work; |
1685 | mq->policy.get_background_work = smq_get_background_work; | |
1686 | mq->policy.complete_background_work = smq_complete_background_work; | |
66a63635 JT |
1687 | mq->policy.set_dirty = smq_set_dirty; |
1688 | mq->policy.clear_dirty = smq_clear_dirty; | |
1689 | mq->policy.load_mapping = smq_load_mapping; | |
b29d4986 | 1690 | mq->policy.invalidate_mapping = smq_invalidate_mapping; |
4e781b49 | 1691 | mq->policy.get_hint = smq_get_hint; |
66a63635 JT |
1692 | mq->policy.residency = smq_residency; |
1693 | mq->policy.tick = smq_tick; | |
b29d4986 | 1694 | mq->policy.allow_migrations = smq_allow_migrations; |
9ed84698 JT |
1695 | |
1696 | if (mimic_mq) { | |
1697 | mq->policy.set_config_value = mq_set_config_value; | |
1698 | mq->policy.emit_config_values = mq_emit_config_values; | |
1699 | } | |
66a63635 JT |
1700 | } |
1701 | ||
1702 | static bool too_many_hotspot_blocks(sector_t origin_size, | |
1703 | sector_t hotspot_block_size, | |
86a3238c | 1704 | unsigned int nr_hotspot_blocks) |
66a63635 JT |
1705 | { |
1706 | return (hotspot_block_size * nr_hotspot_blocks) > origin_size; | |
1707 | } | |
1708 | ||
1709 | static void calc_hotspot_params(sector_t origin_size, | |
1710 | sector_t cache_block_size, | |
86a3238c | 1711 | unsigned int nr_cache_blocks, |
66a63635 | 1712 | sector_t *hotspot_block_size, |
86a3238c | 1713 | unsigned int *nr_hotspot_blocks) |
66a63635 JT |
1714 | { |
1715 | *hotspot_block_size = cache_block_size * 16u; | |
1716 | *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u); | |
1717 | ||
1718 | while ((*hotspot_block_size > cache_block_size) && | |
1719 | too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks)) | |
1720 | *hotspot_block_size /= 2u; | |
1721 | } | |
1722 | ||
9ed84698 JT |
1723 | static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, |
1724 | sector_t origin_size, | |
1725 | sector_t cache_block_size, | |
b29d4986 JT |
1726 | bool mimic_mq, |
1727 | bool migrations_allowed) | |
66a63635 | 1728 | { |
86a3238c HM |
1729 | unsigned int i; |
1730 | unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; | |
1731 | unsigned int total_sentinels = 2u * nr_sentinels_per_queue; | |
66a63635 JT |
1732 | struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); |
1733 | ||
1734 | if (!mq) | |
1735 | return NULL; | |
1736 | ||
9ed84698 | 1737 | init_policy_functions(mq, mimic_mq); |
66a63635 JT |
1738 | mq->cache_size = cache_size; |
1739 | mq->cache_block_size = cache_block_size; | |
1740 | ||
1741 | calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size), | |
1742 | &mq->hotspot_block_size, &mq->nr_hotspot_blocks); | |
1743 | ||
1744 | mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); | |
1745 | mq->hotspot_level_jump = 1u; | |
1746 | if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { | |
1747 | DMERR("couldn't initialize entry space"); | |
1748 | goto bad_pool_init; | |
1749 | } | |
1750 | ||
1751 | init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); | |
b29d4986 | 1752 | for (i = 0; i < nr_sentinels_per_queue; i++) |
66a63635 JT |
1753 | get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; |
1754 | ||
1755 | init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); | |
b29d4986 | 1756 | for (i = 0; i < nr_sentinels_per_queue; i++) |
66a63635 JT |
1757 | get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; |
1758 | ||
1759 | init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, | |
1760 | total_sentinels + mq->nr_hotspot_blocks); | |
1761 | ||
1762 | init_allocator(&mq->cache_alloc, &mq->es, | |
1763 | total_sentinels + mq->nr_hotspot_blocks, | |
1764 | total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); | |
1765 | ||
1766 | mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); | |
1767 | if (!mq->hotspot_hit_bits) { | |
1768 | DMERR("couldn't allocate hotspot hit bitset"); | |
1769 | goto bad_hotspot_hit_bits; | |
1770 | } | |
1771 | clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); | |
1772 | ||
1773 | if (from_cblock(cache_size)) { | |
1774 | mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); | |
134bf30c | 1775 | if (!mq->cache_hit_bits) { |
66a63635 JT |
1776 | DMERR("couldn't allocate cache hit bitset"); |
1777 | goto bad_cache_hit_bits; | |
1778 | } | |
1779 | clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); | |
1780 | } else | |
1781 | mq->cache_hit_bits = NULL; | |
1782 | ||
66a63635 | 1783 | mq->tick = 0; |
4051aab7 | 1784 | spin_lock_init(&mq->lock); |
66a63635 JT |
1785 | |
1786 | q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); | |
1787 | mq->hotspot.nr_top_levels = 8; | |
1788 | mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, | |
1789 | from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); | |
1790 | ||
1791 | q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); | |
1792 | q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); | |
1793 | ||
1794 | stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); | |
1795 | stats_init(&mq->cache_stats, NR_CACHE_LEVELS); | |
1796 | ||
1797 | if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) | |
1798 | goto bad_alloc_table; | |
1799 | ||
1800 | if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) | |
1801 | goto bad_alloc_hotspot_table; | |
1802 | ||
1803 | sentinels_init(mq); | |
1804 | mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; | |
1805 | ||
1806 | mq->next_hotspot_period = jiffies; | |
1807 | mq->next_cache_period = jiffies; | |
1808 | ||
8ee18ede | 1809 | mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */ |
b29d4986 JT |
1810 | if (!mq->bg_work) |
1811 | goto bad_btracker; | |
1812 | ||
1813 | mq->migrations_allowed = migrations_allowed; | |
1814 | ||
66a63635 JT |
1815 | return &mq->policy; |
1816 | ||
b29d4986 JT |
1817 | bad_btracker: |
1818 | h_exit(&mq->hotspot_table); | |
66a63635 JT |
1819 | bad_alloc_hotspot_table: |
1820 | h_exit(&mq->table); | |
1821 | bad_alloc_table: | |
1822 | free_bitset(mq->cache_hit_bits); | |
1823 | bad_cache_hit_bits: | |
1824 | free_bitset(mq->hotspot_hit_bits); | |
1825 | bad_hotspot_hit_bits: | |
1826 | space_exit(&mq->es); | |
1827 | bad_pool_init: | |
1828 | kfree(mq); | |
1829 | ||
1830 | return NULL; | |
1831 | } | |
1832 | ||
9ed84698 JT |
1833 | static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, |
1834 | sector_t origin_size, | |
1835 | sector_t cache_block_size) | |
1836 | { | |
b29d4986 | 1837 | return __smq_create(cache_size, origin_size, cache_block_size, false, true); |
9ed84698 JT |
1838 | } |
1839 | ||
1840 | static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, | |
1841 | sector_t origin_size, | |
1842 | sector_t cache_block_size) | |
1843 | { | |
b29d4986 JT |
1844 | return __smq_create(cache_size, origin_size, cache_block_size, true, true); |
1845 | } | |
1846 | ||
1847 | static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size, | |
1848 | sector_t origin_size, | |
1849 | sector_t cache_block_size) | |
1850 | { | |
1851 | return __smq_create(cache_size, origin_size, cache_block_size, false, false); | |
9ed84698 JT |
1852 | } |
1853 | ||
66a63635 JT |
1854 | /*----------------------------------------------------------------*/ |
1855 | ||
1856 | static struct dm_cache_policy_type smq_policy_type = { | |
1857 | .name = "smq", | |
b29d4986 | 1858 | .version = {2, 0, 0}, |
66a63635 JT |
1859 | .hint_size = 4, |
1860 | .owner = THIS_MODULE, | |
1861 | .create = smq_create | |
1862 | }; | |
1863 | ||
9ed84698 JT |
1864 | static struct dm_cache_policy_type mq_policy_type = { |
1865 | .name = "mq", | |
b29d4986 | 1866 | .version = {2, 0, 0}, |
9ed84698 JT |
1867 | .hint_size = 4, |
1868 | .owner = THIS_MODULE, | |
1869 | .create = mq_create, | |
1870 | }; | |
1871 | ||
b29d4986 JT |
1872 | static struct dm_cache_policy_type cleaner_policy_type = { |
1873 | .name = "cleaner", | |
1874 | .version = {2, 0, 0}, | |
1875 | .hint_size = 4, | |
1876 | .owner = THIS_MODULE, | |
1877 | .create = cleaner_create, | |
1878 | }; | |
1879 | ||
bccab6a0 MS |
1880 | static struct dm_cache_policy_type default_policy_type = { |
1881 | .name = "default", | |
b29d4986 | 1882 | .version = {2, 0, 0}, |
bccab6a0 MS |
1883 | .hint_size = 4, |
1884 | .owner = THIS_MODULE, | |
1885 | .create = smq_create, | |
1886 | .real = &smq_policy_type | |
1887 | }; | |
1888 | ||
66a63635 JT |
1889 | static int __init smq_init(void) |
1890 | { | |
1891 | int r; | |
1892 | ||
1893 | r = dm_cache_policy_register(&smq_policy_type); | |
1894 | if (r) { | |
1895 | DMERR("register failed %d", r); | |
1896 | return -ENOMEM; | |
1897 | } | |
1898 | ||
9ed84698 JT |
1899 | r = dm_cache_policy_register(&mq_policy_type); |
1900 | if (r) { | |
7dd85bb0 | 1901 | DMERR("register failed (as mq) %d", r); |
b29d4986 JT |
1902 | goto out_mq; |
1903 | } | |
1904 | ||
1905 | r = dm_cache_policy_register(&cleaner_policy_type); | |
1906 | if (r) { | |
1907 | DMERR("register failed (as cleaner) %d", r); | |
1908 | goto out_cleaner; | |
9ed84698 JT |
1909 | } |
1910 | ||
bccab6a0 MS |
1911 | r = dm_cache_policy_register(&default_policy_type); |
1912 | if (r) { | |
1913 | DMERR("register failed (as default) %d", r); | |
b29d4986 | 1914 | goto out_default; |
bccab6a0 MS |
1915 | } |
1916 | ||
66a63635 | 1917 | return 0; |
b29d4986 JT |
1918 | |
1919 | out_default: | |
1920 | dm_cache_policy_unregister(&cleaner_policy_type); | |
1921 | out_cleaner: | |
1922 | dm_cache_policy_unregister(&mq_policy_type); | |
1923 | out_mq: | |
1924 | dm_cache_policy_unregister(&smq_policy_type); | |
1925 | ||
1926 | return -ENOMEM; | |
66a63635 JT |
1927 | } |
1928 | ||
1929 | static void __exit smq_exit(void) | |
1930 | { | |
b29d4986 | 1931 | dm_cache_policy_unregister(&cleaner_policy_type); |
66a63635 | 1932 | dm_cache_policy_unregister(&smq_policy_type); |
9ed84698 | 1933 | dm_cache_policy_unregister(&mq_policy_type); |
bccab6a0 | 1934 | dm_cache_policy_unregister(&default_policy_type); |
66a63635 JT |
1935 | } |
1936 | ||
1937 | module_init(smq_init); | |
1938 | module_exit(smq_exit); | |
1939 | ||
1940 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | |
1941 | MODULE_LICENSE("GPL"); | |
1942 | MODULE_DESCRIPTION("smq cache policy"); | |
34dd0517 YZ |
1943 | |
1944 | MODULE_ALIAS("dm-cache-default"); | |
9ed84698 | 1945 | MODULE_ALIAS("dm-cache-mq"); |
b29d4986 | 1946 | MODULE_ALIAS("dm-cache-cleaner"); |