Merge tag 'uml-for-linus-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / fs / btrfs / ulist.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
da5c8135
AJ
2/*
3 * Copyright (C) 2011 STRATO AG
4 * written by Arne Jansen <sensille@gmx.net>
da5c8135
AJ
5 */
6
7#include <linux/slab.h>
9b569ea0 8#include "messages.h"
da5c8135 9#include "ulist.h"
4c7a6f74 10#include "ctree.h"
da5c8135
AJ
11
12/*
13 * ulist is a generic data structure to hold a collection of unique u64
14 * values. The only operations it supports is adding to the list and
15 * enumerating it.
16 * It is possible to store an auxiliary value along with the key.
17 *
da5c8135
AJ
18 * A sample usage for ulists is the enumeration of directed graphs without
19 * visiting a node twice. The pseudo-code could look like this:
20 *
21 * ulist = ulist_alloc();
22 * ulist_add(ulist, root);
cd1b413c 23 * ULIST_ITER_INIT(&uiter);
da5c8135 24 *
cd1b413c 25 * while ((elem = ulist_next(ulist, &uiter)) {
da5c8135
AJ
26 * for (all child nodes n in elem)
27 * ulist_add(ulist, n);
28 * do something useful with the node;
29 * }
30 * ulist_free(ulist);
31 *
01327610 32 * This assumes the graph nodes are addressable by u64. This stems from the
da5c8135
AJ
33 * usage for tree enumeration in btrfs, where the logical addresses are
34 * 64 bit.
35 *
36 * It is also useful for tree enumeration which could be done elegantly
37 * recursively, but is not possible due to kernel stack limitations. The
38 * loop would be similar to the above.
39 */
40
43dd529a
DS
41/*
42 * Freshly initialize a ulist.
43 *
da5c8135
AJ
44 * @ulist: the ulist to initialize
45 *
46 * Note: don't use this function to init an already used ulist, use
47 * ulist_reinit instead.
48 */
49void ulist_init(struct ulist *ulist)
50{
4c7a6f74 51 INIT_LIST_HEAD(&ulist->nodes);
f7f82b81 52 ulist->root = RB_ROOT;
4c7a6f74 53 ulist->nnodes = 0;
da5c8135 54}
da5c8135 55
43dd529a
DS
56/*
57 * Free up additionally allocated memory for the ulist.
58 *
da5c8135
AJ
59 * @ulist: the ulist from which to free the additional memory
60 *
61 * This is useful in cases where the base 'struct ulist' has been statically
62 * allocated.
63 */
6655bc3d 64void ulist_release(struct ulist *ulist)
da5c8135 65{
4c7a6f74
WS
66 struct ulist_node *node;
67 struct ulist_node *next;
68
69 list_for_each_entry_safe(node, next, &ulist->nodes, list) {
70 kfree(node);
71 }
f7f82b81 72 ulist->root = RB_ROOT;
4c7a6f74 73 INIT_LIST_HEAD(&ulist->nodes);
da5c8135 74}
da5c8135 75
43dd529a
DS
76/*
77 * Prepare a ulist for reuse.
78 *
da5c8135
AJ
79 * @ulist: ulist to be reused
80 *
81 * Free up all additional memory allocated for the list elements and reinit
82 * the ulist.
83 */
84void ulist_reinit(struct ulist *ulist)
85{
6655bc3d 86 ulist_release(ulist);
da5c8135
AJ
87 ulist_init(ulist);
88}
da5c8135 89
43dd529a
DS
90/*
91 * Dynamically allocate a ulist.
92 *
da5c8135
AJ
93 * @gfp_mask: allocation flags to for base allocation
94 *
95 * The allocated ulist will be returned in an initialized state.
96 */
2eec6c81 97struct ulist *ulist_alloc(gfp_t gfp_mask)
da5c8135
AJ
98{
99 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
100
101 if (!ulist)
102 return NULL;
103
104 ulist_init(ulist);
105
106 return ulist;
107}
da5c8135 108
43dd529a
DS
109/*
110 * Free dynamically allocated ulist.
111 *
da5c8135
AJ
112 * @ulist: ulist to free
113 *
6655bc3d 114 * It is not necessary to call ulist_release before.
da5c8135
AJ
115 */
116void ulist_free(struct ulist *ulist)
117{
118 if (!ulist)
119 return;
6655bc3d 120 ulist_release(ulist);
da5c8135
AJ
121 kfree(ulist);
122}
da5c8135 123
f7f82b81
WS
124static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
125{
126 struct rb_node *n = ulist->root.rb_node;
127 struct ulist_node *u = NULL;
128
129 while (n) {
130 u = rb_entry(n, struct ulist_node, rb_node);
131 if (u->val < val)
132 n = n->rb_right;
133 else if (u->val > val)
134 n = n->rb_left;
135 else
136 return u;
137 }
138 return NULL;
139}
140
d4b80404
QW
141static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
142{
143 rb_erase(&node->rb_node, &ulist->root);
144 list_del(&node->list);
145 kfree(node);
146 BUG_ON(ulist->nnodes == 0);
147 ulist->nnodes--;
148}
149
f7f82b81
WS
150static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
151{
152 struct rb_node **p = &ulist->root.rb_node;
153 struct rb_node *parent = NULL;
154 struct ulist_node *cur = NULL;
155
156 while (*p) {
157 parent = *p;
158 cur = rb_entry(parent, struct ulist_node, rb_node);
159
160 if (cur->val < ins->val)
161 p = &(*p)->rb_right;
162 else if (cur->val > ins->val)
163 p = &(*p)->rb_left;
164 else
165 return -EEXIST;
166 }
167 rb_link_node(&ins->rb_node, parent, p);
168 rb_insert_color(&ins->rb_node, &ulist->root);
169 return 0;
170}
171
43dd529a
DS
172/*
173 * Add an element to the ulist.
174 *
da5c8135
AJ
175 * @ulist: ulist to add the element to
176 * @val: value to add to ulist
177 * @aux: auxiliary value to store along with val
178 * @gfp_mask: flags to use for allocation
179 *
180 * Note: locking must be provided by the caller. In case of rwlocks write
181 * locking is needed
182 *
183 * Add an element to a ulist. The @val will only be added if it doesn't
184 * already exist. If it is added, the auxiliary value @aux is stored along with
185 * it. In case @val already exists in the ulist, @aux is ignored, even if
186 * it differs from the already stored value.
187 *
188 * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
189 * inserted.
190 * In case of allocation failure -ENOMEM is returned and the ulist stays
191 * unaltered.
192 */
34d73f54 193int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
3301958b
JS
194{
195 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
196}
197
34d73f54
AB
198int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
199 u64 *old_aux, gfp_t gfp_mask)
da5c8135 200{
4c7a6f74
WS
201 int ret;
202 struct ulist_node *node;
203
f7f82b81
WS
204 node = ulist_rbtree_search(ulist, val);
205 if (node) {
206 if (old_aux)
207 *old_aux = node->aux;
208 return 0;
da5c8135 209 }
4c7a6f74
WS
210 node = kmalloc(sizeof(*node), gfp_mask);
211 if (!node)
212 return -ENOMEM;
da5c8135 213
4c7a6f74
WS
214 node->val = val;
215 node->aux = aux;
da5c8135 216
4c7a6f74
WS
217 ret = ulist_rbtree_insert(ulist, node);
218 ASSERT(!ret);
219 list_add_tail(&node->list, &ulist->nodes);
220 ulist->nnodes++;
da5c8135
AJ
221
222 return 1;
223}
da5c8135 224
d4b80404
QW
225/*
226 * ulist_del - delete one node from ulist
227 * @ulist: ulist to remove node from
228 * @val: value to delete
229 * @aux: aux to delete
230 *
231 * The deletion will only be done when *BOTH* val and aux matches.
232 * Return 0 for successful delete.
233 * Return > 0 for not found.
234 */
235int ulist_del(struct ulist *ulist, u64 val, u64 aux)
236{
237 struct ulist_node *node;
238
239 node = ulist_rbtree_search(ulist, val);
240 /* Not found */
241 if (!node)
242 return 1;
243
244 if (node->aux != aux)
245 return 1;
246
247 /* Found and delete */
248 ulist_rbtree_erase(ulist, node);
249 return 0;
250}
251
43dd529a
DS
252/*
253 * Iterate ulist.
254 *
da5c8135 255 * @ulist: ulist to iterate
cd1b413c 256 * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
da5c8135
AJ
257 *
258 * Note: locking must be provided by the caller. In case of rwlocks only read
259 * locking is needed
260 *
cd1b413c
JS
261 * This function is used to iterate an ulist.
262 * It returns the next element from the ulist or %NULL when the
da5c8135
AJ
263 * end is reached. No guarantee is made with respect to the order in which
264 * the elements are returned. They might neither be returned in order of
265 * addition nor in ascending order.
266 * It is allowed to call ulist_add during an enumeration. Newly added items
267 * are guaranteed to show up in the running enumeration.
268 */
fa104a87 269struct ulist_node *ulist_next(const struct ulist *ulist, struct ulist_iterator *uiter)
da5c8135 270{
4c7a6f74
WS
271 struct ulist_node *node;
272
273 if (list_empty(&ulist->nodes))
da5c8135 274 return NULL;
4c7a6f74 275 if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
da5c8135 276 return NULL;
4c7a6f74
WS
277 if (uiter->cur_list) {
278 uiter->cur_list = uiter->cur_list->next;
279 } else {
280 uiter->cur_list = ulist->nodes.next;
4c7a6f74
WS
281 }
282 node = list_entry(uiter->cur_list, struct ulist_node, list);
4c7a6f74 283 return node;
da5c8135 284}