Merge tag 'nfs-for-6.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-block.git] / drivers / md / dm-cache-policy-smq.c
CommitLineData
3bd94003 1// SPDX-License-Identifier: GPL-2.0-only
66a63635
JT
2/*
3 * Copyright (C) 2015 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
b29d4986 8#include "dm-cache-background-tracker.h"
66a63635 9#include "dm-cache-policy-internal.h"
b29d4986 10#include "dm-cache-policy.h"
66a63635
JT
11#include "dm.h"
12
13#include <linux/hash.h>
14#include <linux/jiffies.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/vmalloc.h>
18#include <linux/math64.h>
19
20#define DM_MSG_PREFIX "cache-policy-smq"
21
22/*----------------------------------------------------------------*/
23
24/*
25 * Safe division functions that return zero on divide by zero.
26 */
86a3238c 27static unsigned int safe_div(unsigned int n, unsigned int d)
66a63635
JT
28{
29 return d ? n / d : 0u;
30}
31
86a3238c 32static unsigned int safe_mod(unsigned int n, unsigned int d)
66a63635
JT
33{
34 return d ? n % d : 0u;
35}
36
37/*----------------------------------------------------------------*/
38
39struct entry {
86a3238c
HM
40 unsigned int hash_next:28;
41 unsigned int prev:28;
42 unsigned int next:28;
43 unsigned int level:6;
66a63635
JT
44 bool dirty:1;
45 bool allocated:1;
46 bool sentinel:1;
b29d4986 47 bool pending_work:1;
66a63635
JT
48
49 dm_oblock_t oblock;
50};
51
52/*----------------------------------------------------------------*/
53
54#define INDEXER_NULL ((1u << 28u) - 1u)
55
56/*
57 * An entry_space manages a set of entries that we use for the queues.
58 * The clean and dirty queues share entries, so this object is separate
59 * from the queue itself.
60 */
61struct entry_space {
62 struct entry *begin;
63 struct entry *end;
64};
65
86a3238c 66static int space_init(struct entry_space *es, unsigned int nr_entries)
66a63635
JT
67{
68 if (!nr_entries) {
69 es->begin = es->end = NULL;
70 return 0;
71 }
72
fad953ce 73 es->begin = vzalloc(array_size(nr_entries, sizeof(struct entry)));
66a63635
JT
74 if (!es->begin)
75 return -ENOMEM;
76
77 es->end = es->begin + nr_entries;
78 return 0;
79}
80
81static void space_exit(struct entry_space *es)
82{
83 vfree(es->begin);
84}
85
86a3238c 86static struct entry *__get_entry(struct entry_space *es, unsigned int block)
66a63635
JT
87{
88 struct entry *e;
89
90 e = es->begin + block;
91 BUG_ON(e >= es->end);
92
93 return e;
94}
95
86a3238c 96static unsigned int to_index(struct entry_space *es, struct entry *e)
66a63635
JT
97{
98 BUG_ON(e < es->begin || e >= es->end);
99 return e - es->begin;
100}
101
86a3238c 102static struct entry *to_entry(struct entry_space *es, unsigned int block)
66a63635
JT
103{
104 if (block == INDEXER_NULL)
105 return NULL;
106
107 return __get_entry(es, block);
108}
109
110/*----------------------------------------------------------------*/
111
112struct ilist {
86a3238c
HM
113 unsigned int nr_elts; /* excluding sentinel entries */
114 unsigned int head, tail;
66a63635
JT
115};
116
117static void l_init(struct ilist *l)
118{
119 l->nr_elts = 0;
120 l->head = l->tail = INDEXER_NULL;
121}
122
123static struct entry *l_head(struct entry_space *es, struct ilist *l)
124{
125 return to_entry(es, l->head);
126}
127
128static struct entry *l_tail(struct entry_space *es, struct ilist *l)
129{
130 return to_entry(es, l->tail);
131}
132
133static struct entry *l_next(struct entry_space *es, struct entry *e)
134{
135 return to_entry(es, e->next);
136}
137
138static struct entry *l_prev(struct entry_space *es, struct entry *e)
139{
140 return to_entry(es, e->prev);
141}
142
143static bool l_empty(struct ilist *l)
144{
145 return l->head == INDEXER_NULL;
146}
147
148static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
149{
150 struct entry *head = l_head(es, l);
151
152 e->next = l->head;
153 e->prev = INDEXER_NULL;
154
155 if (head)
156 head->prev = l->head = to_index(es, e);
157 else
158 l->head = l->tail = to_index(es, e);
159
160 if (!e->sentinel)
161 l->nr_elts++;
162}
163
164static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
165{
166 struct entry *tail = l_tail(es, l);
167
168 e->next = INDEXER_NULL;
169 e->prev = l->tail;
170
171 if (tail)
172 tail->next = l->tail = to_index(es, e);
173 else
174 l->head = l->tail = to_index(es, e);
175
176 if (!e->sentinel)
177 l->nr_elts++;
178}
179
180static void l_add_before(struct entry_space *es, struct ilist *l,
181 struct entry *old, struct entry *e)
182{
183 struct entry *prev = l_prev(es, old);
184
185 if (!prev)
186 l_add_head(es, l, e);
187
188 else {
189 e->prev = old->prev;
190 e->next = to_index(es, old);
191 prev->next = old->prev = to_index(es, e);
192
193 if (!e->sentinel)
194 l->nr_elts++;
195 }
196}
197
198static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
199{
200 struct entry *prev = l_prev(es, e);
201 struct entry *next = l_next(es, e);
202
203 if (prev)
204 prev->next = e->next;
205 else
206 l->head = e->next;
207
208 if (next)
209 next->prev = e->prev;
210 else
211 l->tail = e->prev;
212
213 if (!e->sentinel)
214 l->nr_elts--;
215}
216
9768a10d
JT
217static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
218{
219 struct entry *e;
220
221 for (e = l_head(es, l); e; e = l_next(es, e))
222 if (!e->sentinel) {
223 l_del(es, l, e);
224 return e;
225 }
226
227 return NULL;
228}
229
66a63635
JT
230static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
231{
232 struct entry *e;
233
234 for (e = l_tail(es, l); e; e = l_prev(es, e))
235 if (!e->sentinel) {
236 l_del(es, l, e);
237 return e;
238 }
239
240 return NULL;
241}
242
243/*----------------------------------------------------------------*/
244
245/*
246 * The stochastic-multi-queue is a set of lru lists stacked into levels.
247 * Entries are moved up levels when they are used, which loosely orders the
248 * most accessed entries in the top levels and least in the bottom. This
249 * structure is *much* better than a single lru list.
250 */
251#define MAX_LEVELS 64u
252
253struct queue {
254 struct entry_space *es;
255
86a3238c
HM
256 unsigned int nr_elts;
257 unsigned int nr_levels;
66a63635
JT
258 struct ilist qs[MAX_LEVELS];
259
260 /*
261 * We maintain a count of the number of entries we would like in each
262 * level.
263 */
86a3238c
HM
264 unsigned int last_target_nr_elts;
265 unsigned int nr_top_levels;
266 unsigned int nr_in_top_levels;
267 unsigned int target_count[MAX_LEVELS];
66a63635
JT
268};
269
86a3238c 270static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
66a63635 271{
86a3238c 272 unsigned int i;
66a63635
JT
273
274 q->es = es;
275 q->nr_elts = 0;
276 q->nr_levels = nr_levels;
277
278 for (i = 0; i < q->nr_levels; i++) {
279 l_init(q->qs + i);
280 q->target_count[i] = 0u;
281 }
282
283 q->last_target_nr_elts = 0u;
284 q->nr_top_levels = 0u;
285 q->nr_in_top_levels = 0u;
286}
287
86a3238c 288static unsigned int q_size(struct queue *q)
66a63635
JT
289{
290 return q->nr_elts;
291}
292
293/*
294 * Insert an entry to the back of the given level.
295 */
296static void q_push(struct queue *q, struct entry *e)
297{
b29d4986
JT
298 BUG_ON(e->pending_work);
299
66a63635
JT
300 if (!e->sentinel)
301 q->nr_elts++;
302
303 l_add_tail(q->es, q->qs + e->level, e);
304}
305
b29d4986
JT
306static void q_push_front(struct queue *q, struct entry *e)
307{
308 BUG_ON(e->pending_work);
309
310 if (!e->sentinel)
311 q->nr_elts++;
312
313 l_add_head(q->es, q->qs + e->level, e);
314}
315
66a63635
JT
316static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
317{
b29d4986
JT
318 BUG_ON(e->pending_work);
319
66a63635
JT
320 if (!e->sentinel)
321 q->nr_elts++;
322
323 l_add_before(q->es, q->qs + e->level, old, e);
324}
325
326static void q_del(struct queue *q, struct entry *e)
327{
328 l_del(q->es, q->qs + e->level, e);
329 if (!e->sentinel)
330 q->nr_elts--;
331}
332
333/*
334 * Return the oldest entry of the lowest populated level.
335 */
86a3238c 336static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
66a63635 337{
86a3238c 338 unsigned int level;
66a63635
JT
339 struct entry *e;
340
341 max_level = min(max_level, q->nr_levels);
342
343 for (level = 0; level < max_level; level++)
344 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
345 if (e->sentinel) {
346 if (can_cross_sentinel)
347 continue;
348 else
349 break;
350 }
351
352 return e;
353 }
354
355 return NULL;
356}
357
358static struct entry *q_pop(struct queue *q)
359{
360 struct entry *e = q_peek(q, q->nr_levels, true);
361
362 if (e)
363 q_del(q, e);
364
365 return e;
366}
367
66a63635
JT
368/*
369 * This function assumes there is a non-sentinel entry to pop. It's only
370 * used by redistribute, so we know this is true. It also doesn't adjust
371 * the q->nr_elts count.
372 */
86a3238c 373static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
66a63635
JT
374{
375 struct entry *e;
376
377 for (; level < q->nr_levels; level++)
378 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
379 if (!e->sentinel) {
380 l_del(q->es, q->qs + e->level, e);
381 return e;
382 }
383
384 return NULL;
385}
386
86a3238c
HM
387static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
388 unsigned int lbegin, unsigned int lend)
66a63635 389{
86a3238c 390 unsigned int level, nr_levels, entries_per_level, remainder;
66a63635
JT
391
392 BUG_ON(lbegin > lend);
393 BUG_ON(lend > q->nr_levels);
394 nr_levels = lend - lbegin;
395 entries_per_level = safe_div(nr_elts, nr_levels);
396 remainder = safe_mod(nr_elts, nr_levels);
397
398 for (level = lbegin; level < lend; level++)
399 q->target_count[level] =
400 (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
401}
402
403/*
404 * Typically we have fewer elements in the top few levels which allows us
405 * to adjust the promote threshold nicely.
406 */
407static void q_set_targets(struct queue *q)
408{
409 if (q->last_target_nr_elts == q->nr_elts)
410 return;
411
412 q->last_target_nr_elts = q->nr_elts;
413
414 if (q->nr_top_levels > q->nr_levels)
415 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
416
417 else {
418 q_set_targets_subrange_(q, q->nr_in_top_levels,
419 q->nr_levels - q->nr_top_levels, q->nr_levels);
420
421 if (q->nr_in_top_levels < q->nr_elts)
422 q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
423 0, q->nr_levels - q->nr_top_levels);
424 else
425 q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
426 }
427}
428
429static void q_redistribute(struct queue *q)
430{
86a3238c 431 unsigned int target, level;
66a63635
JT
432 struct ilist *l, *l_above;
433 struct entry *e;
434
435 q_set_targets(q);
436
437 for (level = 0u; level < q->nr_levels - 1u; level++) {
438 l = q->qs + level;
439 target = q->target_count[level];
440
441 /*
442 * Pull down some entries from the level above.
443 */
444 while (l->nr_elts < target) {
445 e = __redist_pop_from(q, level + 1u);
446 if (!e) {
447 /* bug in nr_elts */
448 break;
449 }
450
451 e->level = level;
452 l_add_tail(q->es, l, e);
453 }
454
455 /*
456 * Push some entries up.
457 */
458 l_above = q->qs + level + 1u;
459 while (l->nr_elts > target) {
460 e = l_pop_tail(q->es, l);
461
462 if (!e)
463 /* bug in nr_elts */
464 break;
465
466 e->level = level + 1u;
b29d4986 467 l_add_tail(q->es, l_above, e);
66a63635
JT
468 }
469 }
470}
471
86a3238c 472static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
b29d4986 473 struct entry *s1, struct entry *s2)
66a63635
JT
474{
475 struct entry *de;
86a3238c
HM
476 unsigned int sentinels_passed = 0;
477 unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
66a63635 478
b29d4986 479 /* try and find an entry to swap with */
66a63635 480 if (extra_levels && (e->level < q->nr_levels - 1u)) {
b29d4986
JT
481 for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
482 sentinels_passed++;
66a63635 483
b29d4986 484 if (de) {
66a63635
JT
485 q_del(q, de);
486 de->level = e->level;
b29d4986
JT
487 if (s1) {
488 switch (sentinels_passed) {
489 case 0:
490 q_push_before(q, s1, de);
491 break;
492
493 case 1:
494 q_push_before(q, s2, de);
495 break;
66a63635 496
b29d4986
JT
497 default:
498 q_push(q, de);
499 }
500 } else
66a63635 501 q_push(q, de);
66a63635 502 }
66a63635
JT
503 }
504
b29d4986
JT
505 q_del(q, e);
506 e->level = new_level;
66a63635
JT
507 q_push(q, e);
508}
509
66a63635
JT
510/*----------------------------------------------------------------*/
511
512#define FP_SHIFT 8
513#define SIXTEENTH (1u << (FP_SHIFT - 4u))
514#define EIGHTH (1u << (FP_SHIFT - 3u))
515
516struct stats {
86a3238c
HM
517 unsigned int hit_threshold;
518 unsigned int hits;
519 unsigned int misses;
66a63635
JT
520};
521
522enum performance {
523 Q_POOR,
524 Q_FAIR,
525 Q_WELL
526};
527
86a3238c 528static void stats_init(struct stats *s, unsigned int nr_levels)
66a63635
JT
529{
530 s->hit_threshold = (nr_levels * 3u) / 4u;
531 s->hits = 0u;
532 s->misses = 0u;
533}
534
535static void stats_reset(struct stats *s)
536{
537 s->hits = s->misses = 0u;
538}
539
86a3238c 540static void stats_level_accessed(struct stats *s, unsigned int level)
66a63635
JT
541{
542 if (level >= s->hit_threshold)
543 s->hits++;
544 else
545 s->misses++;
546}
547
548static void stats_miss(struct stats *s)
549{
550 s->misses++;
551}
552
553/*
554 * There are times when we don't have any confidence in the hotspot queue.
555 * Such as when a fresh cache is created and the blocks have been spread
556 * out across the levels, or if an io load changes. We detect this by
557 * seeing how often a lookup is in the top levels of the hotspot queue.
558 */
559static enum performance stats_assess(struct stats *s)
560{
86a3238c 561 unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
66a63635
JT
562
563 if (confidence < SIXTEENTH)
564 return Q_POOR;
565
566 else if (confidence < EIGHTH)
567 return Q_FAIR;
568
569 else
570 return Q_WELL;
571}
572
573/*----------------------------------------------------------------*/
574
b29d4986 575struct smq_hash_table {
66a63635
JT
576 struct entry_space *es;
577 unsigned long long hash_bits;
86a3238c 578 unsigned int *buckets;
66a63635
JT
579};
580
581/*
582 * All cache entries are stored in a chained hash table. To save space we
583 * use indexing again, and only store indexes to the next entry.
584 */
86a3238c 585static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
66a63635 586{
86a3238c 587 unsigned int i, nr_buckets;
66a63635
JT
588
589 ht->es = es;
590 nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
a3d939ae 591 ht->hash_bits = __ffs(nr_buckets);
66a63635 592
42bc47b3 593 ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
66a63635
JT
594 if (!ht->buckets)
595 return -ENOMEM;
596
597 for (i = 0; i < nr_buckets; i++)
598 ht->buckets[i] = INDEXER_NULL;
599
600 return 0;
601}
602
b29d4986 603static void h_exit(struct smq_hash_table *ht)
66a63635
JT
604{
605 vfree(ht->buckets);
606}
607
86a3238c 608static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
66a63635
JT
609{
610 return to_entry(ht->es, ht->buckets[bucket]);
611}
612
b29d4986 613static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
66a63635
JT
614{
615 return to_entry(ht->es, e->hash_next);
616}
617
86a3238c 618static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
66a63635
JT
619{
620 e->hash_next = ht->buckets[bucket];
621 ht->buckets[bucket] = to_index(ht->es, e);
622}
623
b29d4986 624static void h_insert(struct smq_hash_table *ht, struct entry *e)
66a63635 625{
86a3238c 626 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
0ef0b471 627
66a63635
JT
628 __h_insert(ht, h, e);
629}
630
86a3238c 631static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
66a63635
JT
632 struct entry **prev)
633{
634 struct entry *e;
635
636 *prev = NULL;
637 for (e = h_head(ht, h); e; e = h_next(ht, e)) {
638 if (e->oblock == oblock)
639 return e;
640
641 *prev = e;
642 }
643
644 return NULL;
645}
646
86a3238c 647static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
66a63635
JT
648 struct entry *e, struct entry *prev)
649{
650 if (prev)
651 prev->hash_next = e->hash_next;
652 else
653 ht->buckets[h] = e->hash_next;
654}
655
656/*
657 * Also moves each entry to the front of the bucket.
658 */
b29d4986 659static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
66a63635
JT
660{
661 struct entry *e, *prev;
86a3238c 662 unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
66a63635
JT
663
664 e = __h_lookup(ht, h, oblock, &prev);
665 if (e && prev) {
666 /*
667 * Move to the front because this entry is likely
668 * to be hit again.
669 */
670 __h_unlink(ht, h, e, prev);
671 __h_insert(ht, h, e);
672 }
673
674 return e;
675}
676
b29d4986 677static void h_remove(struct smq_hash_table *ht, struct entry *e)
66a63635 678{
86a3238c 679 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
66a63635
JT
680 struct entry *prev;
681
682 /*
683 * The down side of using a singly linked list is we have to
684 * iterate the bucket to remove an item.
685 */
686 e = __h_lookup(ht, h, e->oblock, &prev);
687 if (e)
688 __h_unlink(ht, h, e, prev);
689}
690
691/*----------------------------------------------------------------*/
692
693struct entry_alloc {
694 struct entry_space *es;
86a3238c 695 unsigned int begin;
66a63635 696
86a3238c 697 unsigned int nr_allocated;
66a63635
JT
698 struct ilist free;
699};
700
701static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
86a3238c 702 unsigned int begin, unsigned int end)
66a63635 703{
86a3238c 704 unsigned int i;
66a63635
JT
705
706 ea->es = es;
707 ea->nr_allocated = 0u;
708 ea->begin = begin;
709
710 l_init(&ea->free);
711 for (i = begin; i != end; i++)
712 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
713}
714
715static void init_entry(struct entry *e)
716{
717 /*
718 * We can't memset because that would clear the hotspot and
719 * sentinel bits which remain constant.
720 */
721 e->hash_next = INDEXER_NULL;
722 e->next = INDEXER_NULL;
723 e->prev = INDEXER_NULL;
724 e->level = 0u;
b29d4986 725 e->dirty = true; /* FIXME: audit */
66a63635 726 e->allocated = true;
b29d4986
JT
727 e->sentinel = false;
728 e->pending_work = false;
66a63635
JT
729}
730
731static struct entry *alloc_entry(struct entry_alloc *ea)
732{
733 struct entry *e;
734
735 if (l_empty(&ea->free))
736 return NULL;
737
9768a10d 738 e = l_pop_head(ea->es, &ea->free);
66a63635
JT
739 init_entry(e);
740 ea->nr_allocated++;
741
742 return e;
743}
744
745/*
746 * This assumes the cblock hasn't already been allocated.
747 */
86a3238c 748static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
66a63635
JT
749{
750 struct entry *e = __get_entry(ea->es, ea->begin + i);
751
752 BUG_ON(e->allocated);
753
754 l_del(ea->es, &ea->free, e);
755 init_entry(e);
756 ea->nr_allocated++;
757
758 return e;
759}
760
761static void free_entry(struct entry_alloc *ea, struct entry *e)
762{
763 BUG_ON(!ea->nr_allocated);
764 BUG_ON(!e->allocated);
765
766 ea->nr_allocated--;
767 e->allocated = false;
768 l_add_tail(ea->es, &ea->free, e);
769}
770
771static bool allocator_empty(struct entry_alloc *ea)
772{
773 return l_empty(&ea->free);
774}
775
86a3238c 776static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
66a63635
JT
777{
778 return to_index(ea->es, e) - ea->begin;
779}
780
86a3238c 781static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
66a63635
JT
782{
783 return __get_entry(ea->es, ea->begin + index);
784}
785
786/*----------------------------------------------------------------*/
787
788#define NR_HOTSPOT_LEVELS 64u
789#define NR_CACHE_LEVELS 64u
790
b29d4986
JT
791#define WRITEBACK_PERIOD (10ul * HZ)
792#define DEMOTE_PERIOD (60ul * HZ)
66a63635
JT
793
794#define HOTSPOT_UPDATE_PERIOD (HZ)
b29d4986 795#define CACHE_UPDATE_PERIOD (60ul * HZ)
66a63635
JT
796
797struct smq_policy {
798 struct dm_cache_policy policy;
799
800 /* protects everything */
4051aab7 801 spinlock_t lock;
66a63635
JT
802 dm_cblock_t cache_size;
803 sector_t cache_block_size;
804
805 sector_t hotspot_block_size;
86a3238c
HM
806 unsigned int nr_hotspot_blocks;
807 unsigned int cache_blocks_per_hotspot_block;
808 unsigned int hotspot_level_jump;
66a63635
JT
809
810 struct entry_space es;
811 struct entry_alloc writeback_sentinel_alloc;
812 struct entry_alloc demote_sentinel_alloc;
813 struct entry_alloc hotspot_alloc;
814 struct entry_alloc cache_alloc;
815
816 unsigned long *hotspot_hit_bits;
817 unsigned long *cache_hit_bits;
818
819 /*
820 * We maintain three queues of entries. The cache proper,
821 * consisting of a clean and dirty queue, containing the currently
822 * active mappings. The hotspot queue uses a larger block size to
823 * track blocks that are being hit frequently and potential
824 * candidates for promotion to the cache.
825 */
826 struct queue hotspot;
827 struct queue clean;
828 struct queue dirty;
829
830 struct stats hotspot_stats;
831 struct stats cache_stats;
832
833 /*
834 * Keeps track of time, incremented by the core. We use this to
835 * avoid attributing multiple hits within the same tick.
66a63635 836 */
86a3238c 837 unsigned int tick;
66a63635
JT
838
839 /*
840 * The hash tables allows us to quickly find an entry by origin
841 * block.
842 */
b29d4986
JT
843 struct smq_hash_table table;
844 struct smq_hash_table hotspot_table;
66a63635
JT
845
846 bool current_writeback_sentinels;
847 unsigned long next_writeback_period;
848
849 bool current_demote_sentinels;
850 unsigned long next_demote_period;
851
86a3238c
HM
852 unsigned int write_promote_level;
853 unsigned int read_promote_level;
66a63635
JT
854
855 unsigned long next_hotspot_period;
856 unsigned long next_cache_period;
b29d4986
JT
857
858 struct background_tracker *bg_work;
859
860 bool migrations_allowed;
66a63635
JT
861};
862
863/*----------------------------------------------------------------*/
864
86a3238c 865static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
66a63635
JT
866{
867 return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
868}
869
86a3238c 870static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
66a63635
JT
871{
872 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
873}
874
86a3238c 875static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
66a63635
JT
876{
877 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
878}
879
880static void __update_writeback_sentinels(struct smq_policy *mq)
881{
86a3238c 882 unsigned int level;
66a63635
JT
883 struct queue *q = &mq->dirty;
884 struct entry *sentinel;
885
886 for (level = 0; level < q->nr_levels; level++) {
887 sentinel = writeback_sentinel(mq, level);
888 q_del(q, sentinel);
889 q_push(q, sentinel);
890 }
891}
892
893static void __update_demote_sentinels(struct smq_policy *mq)
894{
86a3238c 895 unsigned int level;
66a63635
JT
896 struct queue *q = &mq->clean;
897 struct entry *sentinel;
898
899 for (level = 0; level < q->nr_levels; level++) {
900 sentinel = demote_sentinel(mq, level);
901 q_del(q, sentinel);
902 q_push(q, sentinel);
903 }
904}
905
906static void update_sentinels(struct smq_policy *mq)
907{
908 if (time_after(jiffies, mq->next_writeback_period)) {
66a63635
JT
909 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
910 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
b29d4986 911 __update_writeback_sentinels(mq);
66a63635
JT
912 }
913
914 if (time_after(jiffies, mq->next_demote_period)) {
66a63635
JT
915 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
916 mq->current_demote_sentinels = !mq->current_demote_sentinels;
b29d4986 917 __update_demote_sentinels(mq);
66a63635
JT
918 }
919}
920
921static void __sentinels_init(struct smq_policy *mq)
922{
86a3238c 923 unsigned int level;
66a63635
JT
924 struct entry *sentinel;
925
926 for (level = 0; level < NR_CACHE_LEVELS; level++) {
927 sentinel = writeback_sentinel(mq, level);
928 sentinel->level = level;
929 q_push(&mq->dirty, sentinel);
930
931 sentinel = demote_sentinel(mq, level);
932 sentinel->level = level;
933 q_push(&mq->clean, sentinel);
934 }
935}
936
937static void sentinels_init(struct smq_policy *mq)
938{
939 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
940 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
941
942 mq->current_writeback_sentinels = false;
943 mq->current_demote_sentinels = false;
944 __sentinels_init(mq);
945
946 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
947 mq->current_demote_sentinels = !mq->current_demote_sentinels;
948 __sentinels_init(mq);
949}
950
951/*----------------------------------------------------------------*/
952
b29d4986 953static void del_queue(struct smq_policy *mq, struct entry *e)
66a63635 954{
b29d4986 955 q_del(e->dirty ? &mq->dirty : &mq->clean, e);
66a63635
JT
956}
957
b29d4986 958static void push_queue(struct smq_policy *mq, struct entry *e)
66a63635 959{
b29d4986
JT
960 if (e->dirty)
961 q_push(&mq->dirty, e);
962 else
963 q_push(&mq->clean, e);
66a63635
JT
964}
965
b29d4986
JT
966// !h, !q, a -> h, q, a
967static void push(struct smq_policy *mq, struct entry *e)
66a63635 968{
b29d4986
JT
969 h_insert(&mq->table, e);
970 if (!e->pending_work)
971 push_queue(mq, e);
66a63635
JT
972}
973
b29d4986 974static void push_queue_front(struct smq_policy *mq, struct entry *e)
66a63635 975{
b29d4986
JT
976 if (e->dirty)
977 q_push_front(&mq->dirty, e);
978 else
979 q_push_front(&mq->clean, e);
66a63635
JT
980}
981
b29d4986 982static void push_front(struct smq_policy *mq, struct entry *e)
66a63635 983{
b29d4986
JT
984 h_insert(&mq->table, e);
985 if (!e->pending_work)
986 push_queue_front(mq, e);
66a63635
JT
987}
988
989static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
990{
991 return to_cblock(get_index(&mq->cache_alloc, e));
992}
993
994static void requeue(struct smq_policy *mq, struct entry *e)
995{
b29d4986
JT
996 /*
997 * Pending work has temporarily been taken out of the queues.
998 */
999 if (e->pending_work)
1000 return;
66a63635
JT
1001
1002 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
b29d4986
JT
1003 if (!e->dirty) {
1004 q_requeue(&mq->clean, e, 1u, NULL, NULL);
1005 return;
66a63635 1006 }
b29d4986
JT
1007
1008 q_requeue(&mq->dirty, e, 1u,
1009 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
1010 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
66a63635
JT
1011 }
1012}
1013
86a3238c 1014static unsigned int default_promote_level(struct smq_policy *mq)
66a63635
JT
1015{
1016 /*
1017 * The promote level depends on the current performance of the
1018 * cache.
1019 *
1020 * If the cache is performing badly, then we can't afford
1021 * to promote much without causing performance to drop below that
1022 * of the origin device.
1023 *
1024 * If the cache is performing well, then we don't need to promote
1025 * much. If it isn't broken, don't fix it.
1026 *
1027 * If the cache is middling then we promote more.
1028 *
1029 * This scheme reminds me of a graph of entropy vs probability of a
1030 * binary variable.
1031 */
302f0351
CIK
1032 static const unsigned int table[] = {
1033 1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
1034 };
66a63635 1035
86a3238c
HM
1036 unsigned int hits = mq->cache_stats.hits;
1037 unsigned int misses = mq->cache_stats.misses;
1038 unsigned int index = safe_div(hits << 4u, hits + misses);
66a63635
JT
1039 return table[index];
1040}
1041
1042static void update_promote_levels(struct smq_policy *mq)
1043{
1044 /*
1045 * If there are unused cache entries then we want to be really
1046 * eager to promote.
1047 */
86a3238c 1048 unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
66a63635
JT
1049 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1050
b29d4986
JT
1051 threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1052
66a63635
JT
1053 /*
1054 * If the hotspot queue is performing badly then we have little
1055 * confidence that we know which blocks to promote. So we cut down
1056 * the amount of promotions.
1057 */
1058 switch (stats_assess(&mq->hotspot_stats)) {
1059 case Q_POOR:
1060 threshold_level /= 4u;
1061 break;
1062
1063 case Q_FAIR:
1064 threshold_level /= 2u;
1065 break;
1066
1067 case Q_WELL:
1068 break;
1069 }
1070
1071 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
b29d4986 1072 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
66a63635
JT
1073}
1074
1075/*
1076 * If the hotspot queue is performing badly, then we try and move entries
1077 * around more quickly.
1078 */
1079static void update_level_jump(struct smq_policy *mq)
1080{
1081 switch (stats_assess(&mq->hotspot_stats)) {
1082 case Q_POOR:
1083 mq->hotspot_level_jump = 4u;
1084 break;
1085
1086 case Q_FAIR:
1087 mq->hotspot_level_jump = 2u;
1088 break;
1089
1090 case Q_WELL:
1091 mq->hotspot_level_jump = 1u;
1092 break;
1093 }
1094}
1095
1096static void end_hotspot_period(struct smq_policy *mq)
1097{
1098 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1099 update_promote_levels(mq);
1100
1101 if (time_after(jiffies, mq->next_hotspot_period)) {
1102 update_level_jump(mq);
1103 q_redistribute(&mq->hotspot);
1104 stats_reset(&mq->hotspot_stats);
1105 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1106 }
1107}
1108
1109static void end_cache_period(struct smq_policy *mq)
1110{
1111 if (time_after(jiffies, mq->next_cache_period)) {
1112 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1113
1114 q_redistribute(&mq->dirty);
1115 q_redistribute(&mq->clean);
1116 stats_reset(&mq->cache_stats);
1117
1118 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1119 }
1120}
1121
b29d4986
JT
1122/*----------------------------------------------------------------*/
1123
1124/*
1125 * Targets are given as a percentage.
1126 */
1127#define CLEAN_TARGET 25u
1128#define FREE_TARGET 25u
1129
86a3238c 1130static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
66a63635 1131{
b29d4986
JT
1132 return from_cblock(mq->cache_size) * p / 100u;
1133}
1134
1135static bool clean_target_met(struct smq_policy *mq, bool idle)
1136{
1137 /*
1138 * Cache entries may not be populated. So we cannot rely on the
1139 * size of the clean queue.
1140 */
97dfb203 1141 if (idle) {
66a63635 1142 /*
b29d4986 1143 * We'd like to clean everything.
66a63635 1144 */
b29d4986 1145 return q_size(&mq->dirty) == 0u;
97dfb203
MS
1146 }
1147
2e633095
JT
1148 /*
1149 * If we're busy we don't worry about cleaning at all.
1150 */
1151 return true;
b29d4986 1152}
66a63635 1153
6cf4cc8f 1154static bool free_target_met(struct smq_policy *mq)
b29d4986 1155{
86a3238c 1156 unsigned int nr_free;
66a63635 1157
97dfb203
MS
1158 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1159 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1160 percent_to_target(mq, FREE_TARGET);
66a63635
JT
1161}
1162
b29d4986
JT
1163/*----------------------------------------------------------------*/
1164
1165static void mark_pending(struct smq_policy *mq, struct entry *e)
1166{
1167 BUG_ON(e->sentinel);
1168 BUG_ON(!e->allocated);
1169 BUG_ON(e->pending_work);
1170 e->pending_work = true;
1171}
1172
1173static void clear_pending(struct smq_policy *mq, struct entry *e)
1174{
1175 BUG_ON(!e->pending_work);
1176 e->pending_work = false;
1177}
1178
deb71918 1179static void queue_writeback(struct smq_policy *mq, bool idle)
b29d4986
JT
1180{
1181 int r;
1182 struct policy_work work;
1183 struct entry *e;
1184
deb71918 1185 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
b29d4986
JT
1186 if (e) {
1187 mark_pending(mq, e);
1188 q_del(&mq->dirty, e);
1189
1190 work.op = POLICY_WRITEBACK;
1191 work.oblock = e->oblock;
1192 work.cblock = infer_cblock(mq, e);
1193
1194 r = btracker_queue(mq->bg_work, &work, NULL);
1e72a8e8
JT
1195 if (r) {
1196 clear_pending(mq, e);
1197 q_push_front(&mq->dirty, e);
1198 }
b29d4986
JT
1199 }
1200}
1201
1202static void queue_demotion(struct smq_policy *mq)
1203{
1e72a8e8 1204 int r;
b29d4986
JT
1205 struct policy_work work;
1206 struct entry *e;
1207
bab5d988 1208 if (WARN_ON_ONCE(!mq->migrations_allowed))
b29d4986
JT
1209 return;
1210
a8cd1eba 1211 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
b29d4986 1212 if (!e) {
78c45607 1213 if (!clean_target_met(mq, true))
deb71918 1214 queue_writeback(mq, false);
b29d4986
JT
1215 return;
1216 }
1217
1218 mark_pending(mq, e);
1219 q_del(&mq->clean, e);
1220
1221 work.op = POLICY_DEMOTE;
1222 work.oblock = e->oblock;
1223 work.cblock = infer_cblock(mq, e);
1e72a8e8
JT
1224 r = btracker_queue(mq->bg_work, &work, NULL);
1225 if (r) {
1226 clear_pending(mq, e);
1227 q_push_front(&mq->clean, e);
1228 }
b29d4986
JT
1229}
1230
1231static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1232 struct policy_work **workp)
1233{
1e72a8e8 1234 int r;
b29d4986
JT
1235 struct entry *e;
1236 struct policy_work work;
1237
1238 if (!mq->migrations_allowed)
1239 return;
1240
1241 if (allocator_empty(&mq->cache_alloc)) {
ce1d64e8
JT
1242 /*
1243 * We always claim to be 'idle' to ensure some demotions happen
1244 * with continuous loads.
1245 */
6cf4cc8f 1246 if (!free_target_met(mq))
b29d4986
JT
1247 queue_demotion(mq);
1248 return;
1249 }
1250
1251 if (btracker_promotion_already_present(mq->bg_work, oblock))
1252 return;
1253
1254 /*
1255 * We allocate the entry now to reserve the cblock. If the
1256 * background work is aborted we must remember to free it.
1257 */
1258 e = alloc_entry(&mq->cache_alloc);
1259 BUG_ON(!e);
1260 e->pending_work = true;
1261 work.op = POLICY_PROMOTE;
1262 work.oblock = oblock;
1263 work.cblock = infer_cblock(mq, e);
1e72a8e8
JT
1264 r = btracker_queue(mq->bg_work, &work, workp);
1265 if (r)
1266 free_entry(&mq->cache_alloc, e);
b29d4986
JT
1267}
1268
1269/*----------------------------------------------------------------*/
1270
66a63635
JT
1271enum promote_result {
1272 PROMOTE_NOT,
1273 PROMOTE_TEMPORARY,
1274 PROMOTE_PERMANENT
1275};
1276
1277/*
1278 * Converts a boolean into a promote result.
1279 */
1280static enum promote_result maybe_promote(bool promote)
1281{
1282 return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1283}
1284
b29d4986
JT
1285static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1286 int data_dir, bool fast_promote)
66a63635 1287{
b29d4986 1288 if (data_dir == WRITE) {
66a63635
JT
1289 if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1290 return PROMOTE_TEMPORARY;
1291
b29d4986 1292 return maybe_promote(hs_e->level >= mq->write_promote_level);
66a63635
JT
1293 } else
1294 return maybe_promote(hs_e->level >= mq->read_promote_level);
1295}
1296
66a63635
JT
1297static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1298{
1299 sector_t r = from_oblock(b);
1300 (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1301 return to_oblock(r);
1302}
1303
b29d4986 1304static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
66a63635 1305{
86a3238c 1306 unsigned int hi;
66a63635
JT
1307 dm_oblock_t hb = to_hblock(mq, b);
1308 struct entry *e = h_lookup(&mq->hotspot_table, hb);
1309
1310 if (e) {
1311 stats_level_accessed(&mq->hotspot_stats, e->level);
1312
1313 hi = get_index(&mq->hotspot_alloc, e);
1314 q_requeue(&mq->hotspot, e,
1315 test_and_set_bit(hi, mq->hotspot_hit_bits) ?
b29d4986
JT
1316 0u : mq->hotspot_level_jump,
1317 NULL, NULL);
66a63635
JT
1318
1319 } else {
1320 stats_miss(&mq->hotspot_stats);
1321
1322 e = alloc_entry(&mq->hotspot_alloc);
1323 if (!e) {
1324 e = q_pop(&mq->hotspot);
1325 if (e) {
1326 h_remove(&mq->hotspot_table, e);
1327 hi = get_index(&mq->hotspot_alloc, e);
1328 clear_bit(hi, mq->hotspot_hit_bits);
1329 }
1330
1331 }
1332
1333 if (e) {
1334 e->oblock = hb;
1335 q_push(&mq->hotspot, e);
1336 h_insert(&mq->hotspot_table, e);
1337 }
1338 }
1339
1340 return e;
1341}
1342
66a63635
JT
1343/*----------------------------------------------------------------*/
1344
1345/*
1346 * Public interface, via the policy struct. See dm-cache-policy.h for a
1347 * description of these.
1348 */
1349
1350static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1351{
1352 return container_of(p, struct smq_policy, policy);
1353}
1354
1355static void smq_destroy(struct dm_cache_policy *p)
1356{
1357 struct smq_policy *mq = to_smq_policy(p);
1358
b29d4986 1359 btracker_destroy(mq->bg_work);
66a63635
JT
1360 h_exit(&mq->hotspot_table);
1361 h_exit(&mq->table);
1362 free_bitset(mq->hotspot_hit_bits);
1363 free_bitset(mq->cache_hit_bits);
1364 space_exit(&mq->es);
1365 kfree(mq);
1366}
1367
b29d4986 1368/*----------------------------------------------------------------*/
66a63635 1369
b29d4986
JT
1370static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1371 int data_dir, bool fast_copy,
1372 struct policy_work **work, bool *background_work)
66a63635 1373{
b29d4986
JT
1374 struct entry *e, *hs_e;
1375 enum promote_result pr;
1376
1377 *background_work = false;
66a63635 1378
66a63635
JT
1379 e = h_lookup(&mq->table, oblock);
1380 if (e) {
b29d4986
JT
1381 stats_level_accessed(&mq->cache_stats, e->level);
1382
1383 requeue(mq, e);
66a63635 1384 *cblock = infer_cblock(mq, e);
b29d4986 1385 return 0;
66a63635 1386
b29d4986
JT
1387 } else {
1388 stats_miss(&mq->cache_stats);
66a63635 1389
b29d4986
JT
1390 /*
1391 * The hotspot queue only gets updated with misses.
1392 */
1393 hs_e = update_hotspot_queue(mq, oblock);
66a63635 1394
b29d4986
JT
1395 pr = should_promote(mq, hs_e, data_dir, fast_copy);
1396 if (pr != PROMOTE_NOT) {
1397 queue_promotion(mq, oblock, work);
1398 *background_work = true;
1399 }
66a63635 1400
b29d4986
JT
1401 return -ENOENT;
1402 }
66a63635
JT
1403}
1404
b29d4986
JT
1405static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1406 int data_dir, bool fast_copy,
1407 bool *background_work)
66a63635 1408{
b29d4986 1409 int r;
4051aab7 1410 unsigned long flags;
66a63635
JT
1411 struct smq_policy *mq = to_smq_policy(p);
1412
4051aab7 1413 spin_lock_irqsave(&mq->lock, flags);
b29d4986
JT
1414 r = __lookup(mq, oblock, cblock,
1415 data_dir, fast_copy,
1416 NULL, background_work);
4051aab7 1417 spin_unlock_irqrestore(&mq->lock, flags);
b29d4986
JT
1418
1419 return r;
66a63635
JT
1420}
1421
b29d4986
JT
1422static int smq_lookup_with_work(struct dm_cache_policy *p,
1423 dm_oblock_t oblock, dm_cblock_t *cblock,
1424 int data_dir, bool fast_copy,
1425 struct policy_work **work)
66a63635 1426{
b29d4986
JT
1427 int r;
1428 bool background_queued;
4051aab7 1429 unsigned long flags;
b29d4986 1430 struct smq_policy *mq = to_smq_policy(p);
66a63635 1431
4051aab7 1432 spin_lock_irqsave(&mq->lock, flags);
b29d4986 1433 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
4051aab7 1434 spin_unlock_irqrestore(&mq->lock, flags);
66a63635 1435
b29d4986 1436 return r;
9d1b404c
JT
1437}
1438
b29d4986
JT
1439static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1440 struct policy_work **result)
66a63635 1441{
b29d4986
JT
1442 int r;
1443 unsigned long flags;
66a63635 1444 struct smq_policy *mq = to_smq_policy(p);
66a63635 1445
b29d4986
JT
1446 spin_lock_irqsave(&mq->lock, flags);
1447 r = btracker_issue(mq->bg_work, result);
1448 if (r == -ENODATA) {
6cf4cc8f 1449 if (!clean_target_met(mq, idle)) {
deb71918 1450 queue_writeback(mq, idle);
6cf4cc8f
JT
1451 r = btracker_issue(mq->bg_work, result);
1452 }
b29d4986
JT
1453 }
1454 spin_unlock_irqrestore(&mq->lock, flags);
66a63635 1455
b29d4986 1456 return r;
66a63635
JT
1457}
1458
b29d4986
JT
1459/*
1460 * We need to clear any pending work flags that have been set, and in the
1461 * case of promotion free the entry for the destination cblock.
1462 */
1463static void __complete_background_work(struct smq_policy *mq,
1464 struct policy_work *work,
1465 bool success)
1466{
1467 struct entry *e = get_entry(&mq->cache_alloc,
1468 from_cblock(work->cblock));
1469
1470 switch (work->op) {
1471 case POLICY_PROMOTE:
1472 // !h, !q, a
1473 clear_pending(mq, e);
1474 if (success) {
1475 e->oblock = work->oblock;
4d44ec5a 1476 e->level = NR_CACHE_LEVELS - 1;
b29d4986
JT
1477 push(mq, e);
1478 // h, q, a
1479 } else {
1480 free_entry(&mq->cache_alloc, e);
1481 // !h, !q, !a
1482 }
1483 break;
66a63635 1484
b29d4986
JT
1485 case POLICY_DEMOTE:
1486 // h, !q, a
1487 if (success) {
1488 h_remove(&mq->table, e);
1489 free_entry(&mq->cache_alloc, e);
1490 // !h, !q, !a
1491 } else {
1492 clear_pending(mq, e);
1493 push_queue(mq, e);
1494 // h, q, a
1495 }
1496 break;
66a63635 1497
b29d4986
JT
1498 case POLICY_WRITEBACK:
1499 // h, !q, a
1500 clear_pending(mq, e);
1501 push_queue(mq, e);
1502 // h, q, a
1503 break;
1504 }
1505
1506 btracker_complete(mq->bg_work, work);
66a63635
JT
1507}
1508
b29d4986
JT
1509static void smq_complete_background_work(struct dm_cache_policy *p,
1510 struct policy_work *work,
1511 bool success)
66a63635 1512{
4051aab7 1513 unsigned long flags;
b29d4986 1514 struct smq_policy *mq = to_smq_policy(p);
66a63635 1515
4051aab7 1516 spin_lock_irqsave(&mq->lock, flags);
b29d4986 1517 __complete_background_work(mq, work, success);
4051aab7 1518 spin_unlock_irqrestore(&mq->lock, flags);
66a63635
JT
1519}
1520
b29d4986
JT
1521// in_hash(oblock) -> in_hash(oblock)
1522static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
66a63635
JT
1523{
1524 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1525
b29d4986
JT
1526 if (e->pending_work)
1527 e->dirty = set;
1528 else {
1529 del_queue(mq, e);
1530 e->dirty = set;
1531 push_queue(mq, e);
1532 }
66a63635
JT
1533}
1534
b29d4986 1535static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
66a63635 1536{
4051aab7 1537 unsigned long flags;
66a63635
JT
1538 struct smq_policy *mq = to_smq_policy(p);
1539
4051aab7 1540 spin_lock_irqsave(&mq->lock, flags);
b29d4986 1541 __smq_set_clear_dirty(mq, cblock, true);
4051aab7 1542 spin_unlock_irqrestore(&mq->lock, flags);
66a63635
JT
1543}
1544
b29d4986 1545static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
66a63635 1546{
b29d4986
JT
1547 struct smq_policy *mq = to_smq_policy(p);
1548 unsigned long flags;
66a63635 1549
b29d4986
JT
1550 spin_lock_irqsave(&mq->lock, flags);
1551 __smq_set_clear_dirty(mq, cblock, false);
1552 spin_unlock_irqrestore(&mq->lock, flags);
66a63635
JT
1553}
1554
86a3238c 1555static unsigned int random_level(dm_cblock_t cblock)
66a63635 1556{
b29d4986
JT
1557 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1558}
66a63635 1559
b29d4986
JT
1560static int smq_load_mapping(struct dm_cache_policy *p,
1561 dm_oblock_t oblock, dm_cblock_t cblock,
1562 bool dirty, uint32_t hint, bool hint_valid)
1563{
1564 struct smq_policy *mq = to_smq_policy(p);
1565 struct entry *e;
66a63635 1566
b29d4986
JT
1567 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1568 e->oblock = oblock;
1569 e->dirty = dirty;
1570 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1571 e->pending_work = false;
66a63635 1572
b29d4986
JT
1573 /*
1574 * When we load mappings we push ahead of both sentinels in order to
1575 * allow demotions and cleaning to occur immediately.
1576 */
1577 push_front(mq, e);
66a63635
JT
1578
1579 return 0;
1580}
1581
b29d4986 1582static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
66a63635 1583{
66a63635 1584 struct smq_policy *mq = to_smq_policy(p);
b29d4986 1585 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
66a63635 1586
b29d4986
JT
1587 if (!e->allocated)
1588 return -ENODATA;
66a63635 1589
b29d4986
JT
1590 // FIXME: what if this block has pending background work?
1591 del_queue(mq, e);
1592 h_remove(&mq->table, e);
1593 free_entry(&mq->cache_alloc, e);
1594 return 0;
66a63635
JT
1595}
1596
b29d4986 1597static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
66a63635
JT
1598{
1599 struct smq_policy *mq = to_smq_policy(p);
b29d4986 1600 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
66a63635 1601
b29d4986
JT
1602 if (!e->allocated)
1603 return 0;
1604
1605 return e->level;
66a63635
JT
1606}
1607
1608static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1609{
1610 dm_cblock_t r;
4051aab7 1611 unsigned long flags;
66a63635
JT
1612 struct smq_policy *mq = to_smq_policy(p);
1613
4051aab7 1614 spin_lock_irqsave(&mq->lock, flags);
66a63635 1615 r = to_cblock(mq->cache_alloc.nr_allocated);
4051aab7 1616 spin_unlock_irqrestore(&mq->lock, flags);
66a63635
JT
1617
1618 return r;
1619}
1620
fba10109 1621static void smq_tick(struct dm_cache_policy *p, bool can_block)
66a63635
JT
1622{
1623 struct smq_policy *mq = to_smq_policy(p);
1624 unsigned long flags;
1625
4051aab7
JT
1626 spin_lock_irqsave(&mq->lock, flags);
1627 mq->tick++;
1628 update_sentinels(mq);
1629 end_hotspot_period(mq);
1630 end_cache_period(mq);
1631 spin_unlock_irqrestore(&mq->lock, flags);
66a63635
JT
1632}
1633
b29d4986
JT
1634static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1635{
1636 struct smq_policy *mq = to_smq_policy(p);
0ef0b471 1637
b29d4986
JT
1638 mq->migrations_allowed = allow;
1639}
1640
9ed84698
JT
1641/*
1642 * smq has no config values, but the old mq policy did. To avoid breaking
1643 * software we continue to accept these configurables for the mq policy,
1644 * but they have no effect.
1645 */
1646static int mq_set_config_value(struct dm_cache_policy *p,
1647 const char *key, const char *value)
1648{
1649 unsigned long tmp;
1650
1651 if (kstrtoul(value, 10, &tmp))
1652 return -EINVAL;
1653
1654 if (!strcasecmp(key, "random_threshold") ||
1655 !strcasecmp(key, "sequential_threshold") ||
1656 !strcasecmp(key, "discard_promote_adjustment") ||
1657 !strcasecmp(key, "read_promote_adjustment") ||
1658 !strcasecmp(key, "write_promote_adjustment")) {
1659 DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
1660 return 0;
1661 }
1662
1663 return -EINVAL;
1664}
1665
1666static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
86a3238c 1667 unsigned int maxlen, ssize_t *sz_ptr)
9ed84698
JT
1668{
1669 ssize_t sz = *sz_ptr;
1670
1671 DMEMIT("10 random_threshold 0 "
1672 "sequential_threshold 0 "
1673 "discard_promote_adjustment 0 "
1674 "read_promote_adjustment 0 "
1675 "write_promote_adjustment 0 ");
1676
1677 *sz_ptr = sz;
1678 return 0;
1679}
1680
66a63635 1681/* Init the policy plugin interface function pointers. */
9ed84698 1682static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
66a63635
JT
1683{
1684 mq->policy.destroy = smq_destroy;
66a63635 1685 mq->policy.lookup = smq_lookup;
b29d4986
JT
1686 mq->policy.lookup_with_work = smq_lookup_with_work;
1687 mq->policy.get_background_work = smq_get_background_work;
1688 mq->policy.complete_background_work = smq_complete_background_work;
66a63635
JT
1689 mq->policy.set_dirty = smq_set_dirty;
1690 mq->policy.clear_dirty = smq_clear_dirty;
1691 mq->policy.load_mapping = smq_load_mapping;
b29d4986 1692 mq->policy.invalidate_mapping = smq_invalidate_mapping;
4e781b49 1693 mq->policy.get_hint = smq_get_hint;
66a63635
JT
1694 mq->policy.residency = smq_residency;
1695 mq->policy.tick = smq_tick;
b29d4986 1696 mq->policy.allow_migrations = smq_allow_migrations;
9ed84698
JT
1697
1698 if (mimic_mq) {
1699 mq->policy.set_config_value = mq_set_config_value;
1700 mq->policy.emit_config_values = mq_emit_config_values;
1701 }
66a63635
JT
1702}
1703
1704static bool too_many_hotspot_blocks(sector_t origin_size,
1705 sector_t hotspot_block_size,
86a3238c 1706 unsigned int nr_hotspot_blocks)
66a63635
JT
1707{
1708 return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1709}
1710
1711static void calc_hotspot_params(sector_t origin_size,
1712 sector_t cache_block_size,
86a3238c 1713 unsigned int nr_cache_blocks,
66a63635 1714 sector_t *hotspot_block_size,
86a3238c 1715 unsigned int *nr_hotspot_blocks)
66a63635
JT
1716{
1717 *hotspot_block_size = cache_block_size * 16u;
1718 *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1719
1720 while ((*hotspot_block_size > cache_block_size) &&
1721 too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1722 *hotspot_block_size /= 2u;
1723}
1724
9ed84698
JT
1725static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
1726 sector_t origin_size,
1727 sector_t cache_block_size,
b29d4986
JT
1728 bool mimic_mq,
1729 bool migrations_allowed)
66a63635 1730{
86a3238c
HM
1731 unsigned int i;
1732 unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1733 unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
66a63635
JT
1734 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1735
1736 if (!mq)
1737 return NULL;
1738
9ed84698 1739 init_policy_functions(mq, mimic_mq);
66a63635
JT
1740 mq->cache_size = cache_size;
1741 mq->cache_block_size = cache_block_size;
1742
1743 calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1744 &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1745
1746 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1747 mq->hotspot_level_jump = 1u;
1748 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1749 DMERR("couldn't initialize entry space");
1750 goto bad_pool_init;
1751 }
1752
1753 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
b29d4986 1754 for (i = 0; i < nr_sentinels_per_queue; i++)
66a63635
JT
1755 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1756
1757 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
b29d4986 1758 for (i = 0; i < nr_sentinels_per_queue; i++)
66a63635
JT
1759 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1760
1761 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1762 total_sentinels + mq->nr_hotspot_blocks);
1763
1764 init_allocator(&mq->cache_alloc, &mq->es,
1765 total_sentinels + mq->nr_hotspot_blocks,
1766 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1767
1768 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1769 if (!mq->hotspot_hit_bits) {
1770 DMERR("couldn't allocate hotspot hit bitset");
1771 goto bad_hotspot_hit_bits;
1772 }
1773 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1774
1775 if (from_cblock(cache_size)) {
1776 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
134bf30c 1777 if (!mq->cache_hit_bits) {
66a63635
JT
1778 DMERR("couldn't allocate cache hit bitset");
1779 goto bad_cache_hit_bits;
1780 }
1781 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1782 } else
1783 mq->cache_hit_bits = NULL;
1784
66a63635 1785 mq->tick = 0;
4051aab7 1786 spin_lock_init(&mq->lock);
66a63635
JT
1787
1788 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1789 mq->hotspot.nr_top_levels = 8;
1790 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1791 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1792
1793 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1794 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1795
1796 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1797 stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1798
1799 if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1800 goto bad_alloc_table;
1801
1802 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1803 goto bad_alloc_hotspot_table;
1804
1805 sentinels_init(mq);
1806 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1807
1808 mq->next_hotspot_period = jiffies;
1809 mq->next_cache_period = jiffies;
1810
8ee18ede 1811 mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
b29d4986
JT
1812 if (!mq->bg_work)
1813 goto bad_btracker;
1814
1815 mq->migrations_allowed = migrations_allowed;
1816
66a63635
JT
1817 return &mq->policy;
1818
b29d4986
JT
1819bad_btracker:
1820 h_exit(&mq->hotspot_table);
66a63635
JT
1821bad_alloc_hotspot_table:
1822 h_exit(&mq->table);
1823bad_alloc_table:
1824 free_bitset(mq->cache_hit_bits);
1825bad_cache_hit_bits:
1826 free_bitset(mq->hotspot_hit_bits);
1827bad_hotspot_hit_bits:
1828 space_exit(&mq->es);
1829bad_pool_init:
1830 kfree(mq);
1831
1832 return NULL;
1833}
1834
9ed84698
JT
1835static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1836 sector_t origin_size,
1837 sector_t cache_block_size)
1838{
b29d4986 1839 return __smq_create(cache_size, origin_size, cache_block_size, false, true);
9ed84698
JT
1840}
1841
1842static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1843 sector_t origin_size,
1844 sector_t cache_block_size)
1845{
b29d4986
JT
1846 return __smq_create(cache_size, origin_size, cache_block_size, true, true);
1847}
1848
1849static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1850 sector_t origin_size,
1851 sector_t cache_block_size)
1852{
1853 return __smq_create(cache_size, origin_size, cache_block_size, false, false);
9ed84698
JT
1854}
1855
66a63635
JT
1856/*----------------------------------------------------------------*/
1857
1858static struct dm_cache_policy_type smq_policy_type = {
1859 .name = "smq",
b29d4986 1860 .version = {2, 0, 0},
66a63635
JT
1861 .hint_size = 4,
1862 .owner = THIS_MODULE,
1863 .create = smq_create
1864};
1865
9ed84698
JT
1866static struct dm_cache_policy_type mq_policy_type = {
1867 .name = "mq",
b29d4986 1868 .version = {2, 0, 0},
9ed84698
JT
1869 .hint_size = 4,
1870 .owner = THIS_MODULE,
1871 .create = mq_create,
1872};
1873
b29d4986
JT
1874static struct dm_cache_policy_type cleaner_policy_type = {
1875 .name = "cleaner",
1876 .version = {2, 0, 0},
1877 .hint_size = 4,
1878 .owner = THIS_MODULE,
1879 .create = cleaner_create,
1880};
1881
bccab6a0
MS
1882static struct dm_cache_policy_type default_policy_type = {
1883 .name = "default",
b29d4986 1884 .version = {2, 0, 0},
bccab6a0
MS
1885 .hint_size = 4,
1886 .owner = THIS_MODULE,
1887 .create = smq_create,
1888 .real = &smq_policy_type
1889};
1890
66a63635
JT
1891static int __init smq_init(void)
1892{
1893 int r;
1894
1895 r = dm_cache_policy_register(&smq_policy_type);
1896 if (r) {
1897 DMERR("register failed %d", r);
1898 return -ENOMEM;
1899 }
1900
9ed84698
JT
1901 r = dm_cache_policy_register(&mq_policy_type);
1902 if (r) {
7dd85bb0 1903 DMERR("register failed (as mq) %d", r);
b29d4986
JT
1904 goto out_mq;
1905 }
1906
1907 r = dm_cache_policy_register(&cleaner_policy_type);
1908 if (r) {
1909 DMERR("register failed (as cleaner) %d", r);
1910 goto out_cleaner;
9ed84698
JT
1911 }
1912
bccab6a0
MS
1913 r = dm_cache_policy_register(&default_policy_type);
1914 if (r) {
1915 DMERR("register failed (as default) %d", r);
b29d4986 1916 goto out_default;
bccab6a0
MS
1917 }
1918
66a63635 1919 return 0;
b29d4986
JT
1920
1921out_default:
1922 dm_cache_policy_unregister(&cleaner_policy_type);
1923out_cleaner:
1924 dm_cache_policy_unregister(&mq_policy_type);
1925out_mq:
1926 dm_cache_policy_unregister(&smq_policy_type);
1927
1928 return -ENOMEM;
66a63635
JT
1929}
1930
1931static void __exit smq_exit(void)
1932{
b29d4986 1933 dm_cache_policy_unregister(&cleaner_policy_type);
66a63635 1934 dm_cache_policy_unregister(&smq_policy_type);
9ed84698 1935 dm_cache_policy_unregister(&mq_policy_type);
bccab6a0 1936 dm_cache_policy_unregister(&default_policy_type);
66a63635
JT
1937}
1938
1939module_init(smq_init);
1940module_exit(smq_exit);
1941
1942MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1943MODULE_LICENSE("GPL");
1944MODULE_DESCRIPTION("smq cache policy");
34dd0517
YZ
1945
1946MODULE_ALIAS("dm-cache-default");
9ed84698 1947MODULE_ALIAS("dm-cache-mq");
b29d4986 1948MODULE_ALIAS("dm-cache-cleaner");