Commit | Line | Data |
---|---|---|
3bd94003 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b29d4986 JT |
2 | /* |
3 | * Copyright (C) 2017 Red Hat. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-cache-background-tracker.h" | |
9 | ||
10 | /*----------------------------------------------------------------*/ | |
11 | ||
12 | #define DM_MSG_PREFIX "dm-background-tracker" | |
13 | ||
14 | struct bt_work { | |
15 | struct list_head list; | |
16 | struct rb_node node; | |
17 | struct policy_work work; | |
18 | }; | |
19 | ||
20 | struct background_tracker { | |
86a3238c | 21 | unsigned int max_work; |
b29d4986 JT |
22 | atomic_t pending_promotes; |
23 | atomic_t pending_writebacks; | |
24 | atomic_t pending_demotes; | |
25 | ||
26 | struct list_head issued; | |
27 | struct list_head queued; | |
28 | struct rb_root pending; | |
29 | ||
30 | struct kmem_cache *work_cache; | |
31 | }; | |
32 | ||
86a3238c | 33 | struct background_tracker *btracker_create(unsigned int max_work) |
b29d4986 JT |
34 | { |
35 | struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); | |
36 | ||
7e1b9521 CIK |
37 | if (!b) { |
38 | DMERR("couldn't create background_tracker"); | |
39 | return NULL; | |
40 | } | |
41 | ||
b29d4986 JT |
42 | b->max_work = max_work; |
43 | atomic_set(&b->pending_promotes, 0); | |
44 | atomic_set(&b->pending_writebacks, 0); | |
45 | atomic_set(&b->pending_demotes, 0); | |
46 | ||
47 | INIT_LIST_HEAD(&b->issued); | |
48 | INIT_LIST_HEAD(&b->queued); | |
49 | ||
50 | b->pending = RB_ROOT; | |
51 | b->work_cache = KMEM_CACHE(bt_work, 0); | |
52 | if (!b->work_cache) { | |
53 | DMERR("couldn't create mempool for background work items"); | |
54 | kfree(b); | |
55 | b = NULL; | |
56 | } | |
57 | ||
58 | return b; | |
59 | } | |
60 | EXPORT_SYMBOL_GPL(btracker_create); | |
61 | ||
62 | void btracker_destroy(struct background_tracker *b) | |
63 | { | |
95ab80a8 JT |
64 | struct bt_work *w, *tmp; |
65 | ||
66 | BUG_ON(!list_empty(&b->issued)); | |
67 | list_for_each_entry_safe (w, tmp, &b->queued, list) { | |
68 | list_del(&w->list); | |
69 | kmem_cache_free(b->work_cache, w); | |
70 | } | |
71 | ||
b29d4986 JT |
72 | kmem_cache_destroy(b->work_cache); |
73 | kfree(b); | |
74 | } | |
75 | EXPORT_SYMBOL_GPL(btracker_destroy); | |
76 | ||
77 | static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs) | |
78 | { | |
79 | if (from_oblock(lhs) < from_oblock(rhs)) | |
80 | return -1; | |
81 | ||
82 | if (from_oblock(rhs) < from_oblock(lhs)) | |
83 | return 1; | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | static bool __insert_pending(struct background_tracker *b, | |
89 | struct bt_work *nw) | |
90 | { | |
91 | int cmp; | |
92 | struct bt_work *w; | |
93 | struct rb_node **new = &b->pending.rb_node, *parent = NULL; | |
94 | ||
95 | while (*new) { | |
96 | w = container_of(*new, struct bt_work, node); | |
97 | ||
98 | parent = *new; | |
99 | cmp = cmp_oblock(w->work.oblock, nw->work.oblock); | |
100 | if (cmp < 0) | |
101 | new = &((*new)->rb_left); | |
102 | ||
103 | else if (cmp > 0) | |
104 | new = &((*new)->rb_right); | |
105 | ||
106 | else | |
107 | /* already present */ | |
108 | return false; | |
109 | } | |
110 | ||
111 | rb_link_node(&nw->node, parent, new); | |
112 | rb_insert_color(&nw->node, &b->pending); | |
113 | ||
114 | return true; | |
115 | } | |
116 | ||
117 | static struct bt_work *__find_pending(struct background_tracker *b, | |
118 | dm_oblock_t oblock) | |
119 | { | |
120 | int cmp; | |
121 | struct bt_work *w; | |
122 | struct rb_node **new = &b->pending.rb_node; | |
123 | ||
124 | while (*new) { | |
125 | w = container_of(*new, struct bt_work, node); | |
126 | ||
127 | cmp = cmp_oblock(w->work.oblock, oblock); | |
128 | if (cmp < 0) | |
129 | new = &((*new)->rb_left); | |
130 | ||
131 | else if (cmp > 0) | |
132 | new = &((*new)->rb_right); | |
133 | ||
134 | else | |
135 | break; | |
136 | } | |
137 | ||
138 | return *new ? w : NULL; | |
139 | } | |
140 | ||
141 | ||
142 | static void update_stats(struct background_tracker *b, struct policy_work *w, int delta) | |
143 | { | |
144 | switch (w->op) { | |
145 | case POLICY_PROMOTE: | |
146 | atomic_add(delta, &b->pending_promotes); | |
147 | break; | |
148 | ||
149 | case POLICY_DEMOTE: | |
150 | atomic_add(delta, &b->pending_demotes); | |
151 | break; | |
152 | ||
153 | case POLICY_WRITEBACK: | |
154 | atomic_add(delta, &b->pending_writebacks); | |
155 | break; | |
156 | } | |
157 | } | |
158 | ||
86a3238c | 159 | unsigned int btracker_nr_writebacks_queued(struct background_tracker *b) |
b29d4986 JT |
160 | { |
161 | return atomic_read(&b->pending_writebacks); | |
162 | } | |
163 | EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued); | |
164 | ||
86a3238c | 165 | unsigned int btracker_nr_demotions_queued(struct background_tracker *b) |
b29d4986 JT |
166 | { |
167 | return atomic_read(&b->pending_demotes); | |
168 | } | |
169 | EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued); | |
170 | ||
171 | static bool max_work_reached(struct background_tracker *b) | |
172 | { | |
64748b16 JT |
173 | return atomic_read(&b->pending_promotes) + |
174 | atomic_read(&b->pending_writebacks) + | |
175 | atomic_read(&b->pending_demotes) >= b->max_work; | |
176 | } | |
177 | ||
280884fa | 178 | static struct bt_work *alloc_work(struct background_tracker *b) |
64748b16 JT |
179 | { |
180 | if (max_work_reached(b)) | |
181 | return NULL; | |
182 | ||
183 | return kmem_cache_alloc(b->work_cache, GFP_NOWAIT); | |
b29d4986 JT |
184 | } |
185 | ||
186 | int btracker_queue(struct background_tracker *b, | |
187 | struct policy_work *work, | |
188 | struct policy_work **pwork) | |
189 | { | |
190 | struct bt_work *w; | |
191 | ||
192 | if (pwork) | |
193 | *pwork = NULL; | |
194 | ||
64748b16 | 195 | w = alloc_work(b); |
b29d4986 JT |
196 | if (!w) |
197 | return -ENOMEM; | |
198 | ||
199 | memcpy(&w->work, work, sizeof(*work)); | |
200 | ||
201 | if (!__insert_pending(b, w)) { | |
202 | /* | |
203 | * There was a race, we'll just ignore this second | |
204 | * bit of work for the same oblock. | |
205 | */ | |
206 | kmem_cache_free(b->work_cache, w); | |
207 | return -EINVAL; | |
208 | } | |
209 | ||
210 | if (pwork) { | |
211 | *pwork = &w->work; | |
212 | list_add(&w->list, &b->issued); | |
213 | } else | |
214 | list_add(&w->list, &b->queued); | |
215 | update_stats(b, &w->work, 1); | |
216 | ||
217 | return 0; | |
218 | } | |
219 | EXPORT_SYMBOL_GPL(btracker_queue); | |
220 | ||
221 | /* | |
222 | * Returns -ENODATA if there's no work. | |
223 | */ | |
224 | int btracker_issue(struct background_tracker *b, struct policy_work **work) | |
225 | { | |
226 | struct bt_work *w; | |
227 | ||
228 | if (list_empty(&b->queued)) | |
229 | return -ENODATA; | |
230 | ||
231 | w = list_first_entry(&b->queued, struct bt_work, list); | |
232 | list_move(&w->list, &b->issued); | |
233 | *work = &w->work; | |
234 | ||
235 | return 0; | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(btracker_issue); | |
238 | ||
239 | void btracker_complete(struct background_tracker *b, | |
240 | struct policy_work *op) | |
241 | { | |
242 | struct bt_work *w = container_of(op, struct bt_work, work); | |
243 | ||
244 | update_stats(b, &w->work, -1); | |
245 | rb_erase(&w->node, &b->pending); | |
246 | list_del(&w->list); | |
247 | kmem_cache_free(b->work_cache, w); | |
248 | } | |
249 | EXPORT_SYMBOL_GPL(btracker_complete); | |
250 | ||
251 | bool btracker_promotion_already_present(struct background_tracker *b, | |
252 | dm_oblock_t oblock) | |
253 | { | |
254 | return __find_pending(b, oblock) != NULL; | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(btracker_promotion_already_present); | |
257 | ||
258 | /*----------------------------------------------------------------*/ |