Btrfs: rework ulist with list+rb_tree
[linux-2.6-block.git] / fs / btrfs / ulist.c
CommitLineData
da5c8135
AJ
1/*
2 * Copyright (C) 2011 STRATO AG
3 * written by Arne Jansen <sensille@gmx.net>
4 * Distributed under the GNU GPL license version 2.
5 */
6
7#include <linux/slab.h>
180e001c 8#include <linux/export.h>
da5c8135 9#include "ulist.h"
4c7a6f74 10#include "ctree.h"
da5c8135
AJ
11
12/*
13 * ulist is a generic data structure to hold a collection of unique u64
14 * values. The only operations it supports is adding to the list and
15 * enumerating it.
16 * It is possible to store an auxiliary value along with the key.
17 *
da5c8135
AJ
18 * A sample usage for ulists is the enumeration of directed graphs without
19 * visiting a node twice. The pseudo-code could look like this:
20 *
21 * ulist = ulist_alloc();
22 * ulist_add(ulist, root);
cd1b413c 23 * ULIST_ITER_INIT(&uiter);
da5c8135 24 *
cd1b413c 25 * while ((elem = ulist_next(ulist, &uiter)) {
da5c8135
AJ
26 * for (all child nodes n in elem)
27 * ulist_add(ulist, n);
28 * do something useful with the node;
29 * }
30 * ulist_free(ulist);
31 *
32 * This assumes the graph nodes are adressable by u64. This stems from the
33 * usage for tree enumeration in btrfs, where the logical addresses are
34 * 64 bit.
35 *
36 * It is also useful for tree enumeration which could be done elegantly
37 * recursively, but is not possible due to kernel stack limitations. The
38 * loop would be similar to the above.
39 */
40
41/**
42 * ulist_init - freshly initialize a ulist
43 * @ulist: the ulist to initialize
44 *
45 * Note: don't use this function to init an already used ulist, use
46 * ulist_reinit instead.
47 */
48void ulist_init(struct ulist *ulist)
49{
4c7a6f74 50 INIT_LIST_HEAD(&ulist->nodes);
f7f82b81 51 ulist->root = RB_ROOT;
4c7a6f74 52 ulist->nnodes = 0;
da5c8135
AJ
53}
54EXPORT_SYMBOL(ulist_init);
55
56/**
57 * ulist_fini - free up additionally allocated memory for the ulist
58 * @ulist: the ulist from which to free the additional memory
59 *
60 * This is useful in cases where the base 'struct ulist' has been statically
61 * allocated.
62 */
63void ulist_fini(struct ulist *ulist)
64{
4c7a6f74
WS
65 struct ulist_node *node;
66 struct ulist_node *next;
67
68 list_for_each_entry_safe(node, next, &ulist->nodes, list) {
69 kfree(node);
70 }
f7f82b81 71 ulist->root = RB_ROOT;
4c7a6f74 72 INIT_LIST_HEAD(&ulist->nodes);
da5c8135
AJ
73}
74EXPORT_SYMBOL(ulist_fini);
75
76/**
77 * ulist_reinit - prepare a ulist for reuse
78 * @ulist: ulist to be reused
79 *
80 * Free up all additional memory allocated for the list elements and reinit
81 * the ulist.
82 */
83void ulist_reinit(struct ulist *ulist)
84{
85 ulist_fini(ulist);
86 ulist_init(ulist);
87}
88EXPORT_SYMBOL(ulist_reinit);
89
90/**
91 * ulist_alloc - dynamically allocate a ulist
92 * @gfp_mask: allocation flags to for base allocation
93 *
94 * The allocated ulist will be returned in an initialized state.
95 */
2eec6c81 96struct ulist *ulist_alloc(gfp_t gfp_mask)
da5c8135
AJ
97{
98 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
99
100 if (!ulist)
101 return NULL;
102
103 ulist_init(ulist);
104
105 return ulist;
106}
107EXPORT_SYMBOL(ulist_alloc);
108
109/**
110 * ulist_free - free dynamically allocated ulist
111 * @ulist: ulist to free
112 *
113 * It is not necessary to call ulist_fini before.
114 */
115void ulist_free(struct ulist *ulist)
116{
117 if (!ulist)
118 return;
119 ulist_fini(ulist);
120 kfree(ulist);
121}
122EXPORT_SYMBOL(ulist_free);
123
f7f82b81
WS
124static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
125{
126 struct rb_node *n = ulist->root.rb_node;
127 struct ulist_node *u = NULL;
128
129 while (n) {
130 u = rb_entry(n, struct ulist_node, rb_node);
131 if (u->val < val)
132 n = n->rb_right;
133 else if (u->val > val)
134 n = n->rb_left;
135 else
136 return u;
137 }
138 return NULL;
139}
140
141static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
142{
143 struct rb_node **p = &ulist->root.rb_node;
144 struct rb_node *parent = NULL;
145 struct ulist_node *cur = NULL;
146
147 while (*p) {
148 parent = *p;
149 cur = rb_entry(parent, struct ulist_node, rb_node);
150
151 if (cur->val < ins->val)
152 p = &(*p)->rb_right;
153 else if (cur->val > ins->val)
154 p = &(*p)->rb_left;
155 else
156 return -EEXIST;
157 }
158 rb_link_node(&ins->rb_node, parent, p);
159 rb_insert_color(&ins->rb_node, &ulist->root);
160 return 0;
161}
162
da5c8135
AJ
163/**
164 * ulist_add - add an element to the ulist
165 * @ulist: ulist to add the element to
166 * @val: value to add to ulist
167 * @aux: auxiliary value to store along with val
168 * @gfp_mask: flags to use for allocation
169 *
170 * Note: locking must be provided by the caller. In case of rwlocks write
171 * locking is needed
172 *
173 * Add an element to a ulist. The @val will only be added if it doesn't
174 * already exist. If it is added, the auxiliary value @aux is stored along with
175 * it. In case @val already exists in the ulist, @aux is ignored, even if
176 * it differs from the already stored value.
177 *
178 * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
179 * inserted.
180 * In case of allocation failure -ENOMEM is returned and the ulist stays
181 * unaltered.
182 */
34d73f54 183int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
3301958b
JS
184{
185 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
186}
187
34d73f54
AB
188int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
189 u64 *old_aux, gfp_t gfp_mask)
da5c8135 190{
4c7a6f74
WS
191 int ret;
192 struct ulist_node *node;
193
f7f82b81
WS
194 node = ulist_rbtree_search(ulist, val);
195 if (node) {
196 if (old_aux)
197 *old_aux = node->aux;
198 return 0;
da5c8135 199 }
4c7a6f74
WS
200 node = kmalloc(sizeof(*node), gfp_mask);
201 if (!node)
202 return -ENOMEM;
da5c8135 203
4c7a6f74
WS
204 node->val = val;
205 node->aux = aux;
206#ifdef CONFIG_BTRFS_DEBUG
207 node->seqnum = ulist->nnodes;
208#endif
da5c8135 209
4c7a6f74
WS
210 ret = ulist_rbtree_insert(ulist, node);
211 ASSERT(!ret);
212 list_add_tail(&node->list, &ulist->nodes);
213 ulist->nnodes++;
da5c8135
AJ
214
215 return 1;
216}
217EXPORT_SYMBOL(ulist_add);
218
219/**
220 * ulist_next - iterate ulist
221 * @ulist: ulist to iterate
cd1b413c 222 * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
da5c8135
AJ
223 *
224 * Note: locking must be provided by the caller. In case of rwlocks only read
225 * locking is needed
226 *
cd1b413c
JS
227 * This function is used to iterate an ulist.
228 * It returns the next element from the ulist or %NULL when the
da5c8135
AJ
229 * end is reached. No guarantee is made with respect to the order in which
230 * the elements are returned. They might neither be returned in order of
231 * addition nor in ascending order.
232 * It is allowed to call ulist_add during an enumeration. Newly added items
233 * are guaranteed to show up in the running enumeration.
234 */
cd1b413c 235struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
da5c8135 236{
4c7a6f74
WS
237 struct ulist_node *node;
238
239 if (list_empty(&ulist->nodes))
da5c8135 240 return NULL;
4c7a6f74 241 if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
da5c8135 242 return NULL;
4c7a6f74
WS
243 if (uiter->cur_list) {
244 uiter->cur_list = uiter->cur_list->next;
245 } else {
246 uiter->cur_list = ulist->nodes.next;
247#ifdef CONFIG_BTRFS_DEBUG
248 uiter->i = 0;
249#endif
250 }
251 node = list_entry(uiter->cur_list, struct ulist_node, list);
252#ifdef CONFIG_BTRFS_DEBUG
253 ASSERT(node->seqnum == uiter->i);
254 ASSERT(uiter->i >= 0 && uiter->i < ulist->nnodes);
255 uiter->i++;
256#endif
257 return node;
da5c8135
AJ
258}
259EXPORT_SYMBOL(ulist_next);