Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
3219b3b7 ND |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * | |
e15ae2dd | 11 | * Small id to pointer translation service. |
1da177e4 | 12 | * |
e15ae2dd | 13 | * It uses a radix tree like structure as a sparse array indexed |
1da177e4 | 14 | * by the id to obtain the pointer. The bitmap makes allocating |
e15ae2dd | 15 | * a new id quick. |
1da177e4 LT |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a | |
18 | * pointer or what ever, we treat it as a (void *). You can pass this | |
19 | * id to a user for him to pass back at a later time. You then pass | |
20 | * that id to this code and it returns your pointer. | |
21 | ||
e15ae2dd | 22 | * You can release ids at any time. When all ids are released, most of |
125c4c70 | 23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
e15ae2dd | 24 | * don't need to go to the memory "store" during an id allocate, just |
1da177e4 LT |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. | |
27 | */ | |
28 | ||
29 | #ifndef TEST // to test in user space... | |
30 | #include <linux/slab.h> | |
31 | #include <linux/init.h> | |
8bc3bcc9 | 32 | #include <linux/export.h> |
1da177e4 | 33 | #endif |
5806f07c | 34 | #include <linux/err.h> |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | |
88eca020 | 37 | #include <linux/spinlock.h> |
1da177e4 | 38 | |
e18b890b | 39 | static struct kmem_cache *idr_layer_cache; |
88eca020 | 40 | static DEFINE_SPINLOCK(simple_ida_lock); |
1da177e4 | 41 | |
4ae53789 | 42 | static struct idr_layer *get_from_free_list(struct idr *idp) |
1da177e4 LT |
43 | { |
44 | struct idr_layer *p; | |
c259cc28 | 45 | unsigned long flags; |
1da177e4 | 46 | |
c259cc28 | 47 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
48 | if ((p = idp->id_free)) { |
49 | idp->id_free = p->ary[0]; | |
50 | idp->id_free_cnt--; | |
51 | p->ary[0] = NULL; | |
52 | } | |
c259cc28 | 53 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
54 | return(p); |
55 | } | |
56 | ||
cf481c20 ND |
57 | static void idr_layer_rcu_free(struct rcu_head *head) |
58 | { | |
59 | struct idr_layer *layer; | |
60 | ||
61 | layer = container_of(head, struct idr_layer, rcu_head); | |
62 | kmem_cache_free(idr_layer_cache, layer); | |
63 | } | |
64 | ||
65 | static inline void free_layer(struct idr_layer *p) | |
66 | { | |
67 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | |
68 | } | |
69 | ||
1eec0056 | 70 | /* only called when idp->lock is held */ |
4ae53789 | 71 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
1eec0056 SR |
72 | { |
73 | p->ary[0] = idp->id_free; | |
74 | idp->id_free = p; | |
75 | idp->id_free_cnt++; | |
76 | } | |
77 | ||
4ae53789 | 78 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
1da177e4 | 79 | { |
c259cc28 RD |
80 | unsigned long flags; |
81 | ||
1da177e4 LT |
82 | /* |
83 | * Depends on the return element being zeroed. | |
84 | */ | |
c259cc28 | 85 | spin_lock_irqsave(&idp->lock, flags); |
4ae53789 | 86 | __move_to_free_list(idp, p); |
c259cc28 | 87 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
88 | } |
89 | ||
e33ac8bd TH |
90 | static void idr_mark_full(struct idr_layer **pa, int id) |
91 | { | |
92 | struct idr_layer *p = pa[0]; | |
93 | int l = 0; | |
94 | ||
95 | __set_bit(id & IDR_MASK, &p->bitmap); | |
96 | /* | |
97 | * If this layer is full mark the bit in the layer above to | |
98 | * show that this part of the radix tree is full. This may | |
99 | * complete the layer above and require walking up the radix | |
100 | * tree. | |
101 | */ | |
102 | while (p->bitmap == IDR_FULL) { | |
103 | if (!(p = pa[++l])) | |
104 | break; | |
105 | id = id >> IDR_BITS; | |
106 | __set_bit((id & IDR_MASK), &p->bitmap); | |
107 | } | |
108 | } | |
109 | ||
1da177e4 | 110 | /** |
56083ab1 | 111 | * idr_pre_get - reserve resources for idr allocation |
1da177e4 LT |
112 | * @idp: idr handle |
113 | * @gfp_mask: memory allocation flags | |
114 | * | |
066a9be6 NA |
115 | * This function should be called prior to calling the idr_get_new* functions. |
116 | * It preallocates enough memory to satisfy the worst possible allocation. The | |
117 | * caller should pass in GFP_KERNEL if possible. This of course requires that | |
118 | * no spinning locks be held. | |
1da177e4 | 119 | * |
56083ab1 RD |
120 | * If the system is REALLY out of memory this function returns %0, |
121 | * otherwise %1. | |
1da177e4 | 122 | */ |
fd4f2df2 | 123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
1da177e4 | 124 | { |
125c4c70 | 125 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
1da177e4 | 126 | struct idr_layer *new; |
5b019e99 | 127 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
e15ae2dd | 128 | if (new == NULL) |
1da177e4 | 129 | return (0); |
4ae53789 | 130 | move_to_free_list(idp, new); |
1da177e4 LT |
131 | } |
132 | return 1; | |
133 | } | |
134 | EXPORT_SYMBOL(idr_pre_get); | |
135 | ||
12d1b439 TH |
136 | /** |
137 | * sub_alloc - try to allocate an id without growing the tree depth | |
138 | * @idp: idr handle | |
139 | * @starting_id: id to start search at | |
140 | * @id: pointer to the allocated handle | |
141 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | |
142 | * | |
143 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without | |
144 | * growing its depth. Returns | |
145 | * | |
146 | * the allocated id >= 0 if successful, | |
147 | * -EAGAIN if the tree needs to grow for allocation to succeed, | |
148 | * -ENOSPC if the id space is exhausted, | |
149 | * -ENOMEM if more idr_layers need to be allocated. | |
150 | */ | |
e33ac8bd | 151 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
1da177e4 LT |
152 | { |
153 | int n, m, sh; | |
154 | struct idr_layer *p, *new; | |
7aae6dd8 | 155 | int l, id, oid; |
5ba25331 | 156 | unsigned long bm; |
1da177e4 LT |
157 | |
158 | id = *starting_id; | |
7aae6dd8 | 159 | restart: |
1da177e4 LT |
160 | p = idp->top; |
161 | l = idp->layers; | |
162 | pa[l--] = NULL; | |
163 | while (1) { | |
164 | /* | |
165 | * We run around this while until we reach the leaf node... | |
166 | */ | |
167 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
168 | bm = ~p->bitmap; | |
169 | m = find_next_bit(&bm, IDR_SIZE, n); | |
170 | if (m == IDR_SIZE) { | |
171 | /* no space available go back to previous layer. */ | |
172 | l++; | |
7aae6dd8 | 173 | oid = id; |
e15ae2dd | 174 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
7aae6dd8 TH |
175 | |
176 | /* if already at the top layer, we need to grow */ | |
d2e7276b | 177 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
1da177e4 | 178 | *starting_id = id; |
12d1b439 | 179 | return -EAGAIN; |
1da177e4 | 180 | } |
d2e7276b TH |
181 | p = pa[l]; |
182 | BUG_ON(!p); | |
7aae6dd8 TH |
183 | |
184 | /* If we need to go up one layer, continue the | |
185 | * loop; otherwise, restart from the top. | |
186 | */ | |
187 | sh = IDR_BITS * (l + 1); | |
188 | if (oid >> sh == id >> sh) | |
189 | continue; | |
190 | else | |
191 | goto restart; | |
1da177e4 LT |
192 | } |
193 | if (m != n) { | |
194 | sh = IDR_BITS*l; | |
195 | id = ((id >> sh) ^ n ^ m) << sh; | |
196 | } | |
125c4c70 | 197 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
12d1b439 | 198 | return -ENOSPC; |
1da177e4 LT |
199 | if (l == 0) |
200 | break; | |
201 | /* | |
202 | * Create the layer below if it is missing. | |
203 | */ | |
204 | if (!p->ary[m]) { | |
4ae53789 ND |
205 | new = get_from_free_list(idp); |
206 | if (!new) | |
12d1b439 | 207 | return -ENOMEM; |
6ff2d39b | 208 | new->layer = l-1; |
3219b3b7 | 209 | rcu_assign_pointer(p->ary[m], new); |
1da177e4 LT |
210 | p->count++; |
211 | } | |
212 | pa[l--] = p; | |
213 | p = p->ary[m]; | |
214 | } | |
e33ac8bd TH |
215 | |
216 | pa[l] = p; | |
217 | return id; | |
1da177e4 LT |
218 | } |
219 | ||
e33ac8bd TH |
220 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
221 | struct idr_layer **pa) | |
1da177e4 LT |
222 | { |
223 | struct idr_layer *p, *new; | |
224 | int layers, v, id; | |
c259cc28 | 225 | unsigned long flags; |
e15ae2dd | 226 | |
1da177e4 LT |
227 | id = starting_id; |
228 | build_up: | |
229 | p = idp->top; | |
230 | layers = idp->layers; | |
231 | if (unlikely(!p)) { | |
4ae53789 | 232 | if (!(p = get_from_free_list(idp))) |
12d1b439 | 233 | return -ENOMEM; |
6ff2d39b | 234 | p->layer = 0; |
1da177e4 LT |
235 | layers = 1; |
236 | } | |
237 | /* | |
238 | * Add a new layer to the top of the tree if the requested | |
239 | * id is larger than the currently allocated space. | |
240 | */ | |
125c4c70 | 241 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
1da177e4 | 242 | layers++; |
711a49a0 MS |
243 | if (!p->count) { |
244 | /* special case: if the tree is currently empty, | |
245 | * then we grow the tree by moving the top node | |
246 | * upwards. | |
247 | */ | |
248 | p->layer++; | |
1da177e4 | 249 | continue; |
711a49a0 | 250 | } |
4ae53789 | 251 | if (!(new = get_from_free_list(idp))) { |
1da177e4 LT |
252 | /* |
253 | * The allocation failed. If we built part of | |
254 | * the structure tear it down. | |
255 | */ | |
c259cc28 | 256 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
257 | for (new = p; p && p != idp->top; new = p) { |
258 | p = p->ary[0]; | |
259 | new->ary[0] = NULL; | |
260 | new->bitmap = new->count = 0; | |
4ae53789 | 261 | __move_to_free_list(idp, new); |
1da177e4 | 262 | } |
c259cc28 | 263 | spin_unlock_irqrestore(&idp->lock, flags); |
12d1b439 | 264 | return -ENOMEM; |
1da177e4 LT |
265 | } |
266 | new->ary[0] = p; | |
267 | new->count = 1; | |
6ff2d39b | 268 | new->layer = layers-1; |
1da177e4 LT |
269 | if (p->bitmap == IDR_FULL) |
270 | __set_bit(0, &new->bitmap); | |
271 | p = new; | |
272 | } | |
3219b3b7 | 273 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 274 | idp->layers = layers; |
e33ac8bd | 275 | v = sub_alloc(idp, &id, pa); |
12d1b439 | 276 | if (v == -EAGAIN) |
1da177e4 LT |
277 | goto build_up; |
278 | return(v); | |
279 | } | |
280 | ||
e33ac8bd TH |
281 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
282 | { | |
125c4c70 | 283 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
e33ac8bd TH |
284 | int id; |
285 | ||
286 | id = idr_get_empty_slot(idp, starting_id, pa); | |
287 | if (id >= 0) { | |
288 | /* | |
289 | * Successfully found an empty slot. Install the user | |
290 | * pointer and mark the slot full. | |
291 | */ | |
3219b3b7 ND |
292 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
293 | (struct idr_layer *)ptr); | |
e33ac8bd TH |
294 | pa[0]->count++; |
295 | idr_mark_full(pa, id); | |
296 | } | |
297 | ||
298 | return id; | |
299 | } | |
300 | ||
1da177e4 | 301 | /** |
7c657f2f | 302 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
1da177e4 | 303 | * @idp: idr handle |
94e2bd68 | 304 | * @ptr: pointer you want associated with the id |
ea24ea85 | 305 | * @starting_id: id to start search at |
1da177e4 LT |
306 | * @id: pointer to the allocated handle |
307 | * | |
308 | * This is the allocate id function. It should be called with any | |
309 | * required locks. | |
310 | * | |
066a9be6 | 311 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
56083ab1 | 312 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
066a9be6 NA |
313 | * IDR's preallocation and then retry the idr_get_new_above() call. |
314 | * | |
56083ab1 | 315 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
1da177e4 | 316 | * |
56083ab1 | 317 | * @id returns a value in the range @starting_id ... %0x7fffffff |
1da177e4 LT |
318 | */ |
319 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
320 | { | |
321 | int rv; | |
e15ae2dd | 322 | |
1da177e4 | 323 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
944ca05c | 324 | if (rv < 0) |
12d1b439 | 325 | return rv == -ENOMEM ? -EAGAIN : rv; |
1da177e4 LT |
326 | *id = rv; |
327 | return 0; | |
328 | } | |
329 | EXPORT_SYMBOL(idr_get_new_above); | |
330 | ||
1da177e4 LT |
331 | static void idr_remove_warning(int id) |
332 | { | |
f098ad65 ND |
333 | printk(KERN_WARNING |
334 | "idr_remove called for id=%d which is not allocated.\n", id); | |
1da177e4 LT |
335 | dump_stack(); |
336 | } | |
337 | ||
338 | static void sub_remove(struct idr *idp, int shift, int id) | |
339 | { | |
340 | struct idr_layer *p = idp->top; | |
125c4c70 | 341 | struct idr_layer **pa[MAX_IDR_LEVEL]; |
1da177e4 | 342 | struct idr_layer ***paa = &pa[0]; |
cf481c20 | 343 | struct idr_layer *to_free; |
1da177e4 LT |
344 | int n; |
345 | ||
346 | *paa = NULL; | |
347 | *++paa = &idp->top; | |
348 | ||
349 | while ((shift > 0) && p) { | |
350 | n = (id >> shift) & IDR_MASK; | |
351 | __clear_bit(n, &p->bitmap); | |
352 | *++paa = &p->ary[n]; | |
353 | p = p->ary[n]; | |
354 | shift -= IDR_BITS; | |
355 | } | |
356 | n = id & IDR_MASK; | |
357 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | |
358 | __clear_bit(n, &p->bitmap); | |
cf481c20 ND |
359 | rcu_assign_pointer(p->ary[n], NULL); |
360 | to_free = NULL; | |
1da177e4 | 361 | while(*paa && ! --((**paa)->count)){ |
cf481c20 ND |
362 | if (to_free) |
363 | free_layer(to_free); | |
364 | to_free = **paa; | |
1da177e4 LT |
365 | **paa-- = NULL; |
366 | } | |
e15ae2dd | 367 | if (!*paa) |
1da177e4 | 368 | idp->layers = 0; |
cf481c20 ND |
369 | if (to_free) |
370 | free_layer(to_free); | |
e15ae2dd | 371 | } else |
1da177e4 | 372 | idr_remove_warning(id); |
1da177e4 LT |
373 | } |
374 | ||
375 | /** | |
56083ab1 | 376 | * idr_remove - remove the given id and free its slot |
72fd4a35 RD |
377 | * @idp: idr handle |
378 | * @id: unique key | |
1da177e4 LT |
379 | */ |
380 | void idr_remove(struct idr *idp, int id) | |
381 | { | |
382 | struct idr_layer *p; | |
cf481c20 | 383 | struct idr_layer *to_free; |
1da177e4 LT |
384 | |
385 | /* Mask off upper bits we don't use for the search. */ | |
125c4c70 | 386 | id &= MAX_IDR_MASK; |
1da177e4 LT |
387 | |
388 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
e15ae2dd | 389 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
cf481c20 ND |
390 | idp->top->ary[0]) { |
391 | /* | |
392 | * Single child at leftmost slot: we can shrink the tree. | |
393 | * This level is not needed anymore since when layers are | |
394 | * inserted, they are inserted at the top of the existing | |
395 | * tree. | |
396 | */ | |
397 | to_free = idp->top; | |
1da177e4 | 398 | p = idp->top->ary[0]; |
cf481c20 | 399 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 400 | --idp->layers; |
cf481c20 ND |
401 | to_free->bitmap = to_free->count = 0; |
402 | free_layer(to_free); | |
1da177e4 | 403 | } |
125c4c70 | 404 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
4ae53789 | 405 | p = get_from_free_list(idp); |
cf481c20 ND |
406 | /* |
407 | * Note: we don't call the rcu callback here, since the only | |
408 | * layers that fall into the freelist are those that have been | |
409 | * preallocated. | |
410 | */ | |
1da177e4 | 411 | kmem_cache_free(idr_layer_cache, p); |
1da177e4 | 412 | } |
af8e2a4c | 413 | return; |
1da177e4 LT |
414 | } |
415 | EXPORT_SYMBOL(idr_remove); | |
416 | ||
fe6e24ec | 417 | void __idr_remove_all(struct idr *idp) |
23936cc0 | 418 | { |
6ace06dc | 419 | int n, id, max; |
2dcb22b3 | 420 | int bt_mask; |
23936cc0 | 421 | struct idr_layer *p; |
125c4c70 | 422 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
23936cc0 KH |
423 | struct idr_layer **paa = &pa[0]; |
424 | ||
425 | n = idp->layers * IDR_BITS; | |
426 | p = idp->top; | |
1b23336a | 427 | rcu_assign_pointer(idp->top, NULL); |
23936cc0 KH |
428 | max = 1 << n; |
429 | ||
430 | id = 0; | |
6ace06dc | 431 | while (id < max) { |
23936cc0 KH |
432 | while (n > IDR_BITS && p) { |
433 | n -= IDR_BITS; | |
434 | *paa++ = p; | |
435 | p = p->ary[(id >> n) & IDR_MASK]; | |
436 | } | |
437 | ||
2dcb22b3 | 438 | bt_mask = id; |
23936cc0 | 439 | id += 1 << n; |
2dcb22b3 ID |
440 | /* Get the highest bit that the above add changed from 0->1. */ |
441 | while (n < fls(id ^ bt_mask)) { | |
cf481c20 ND |
442 | if (p) |
443 | free_layer(p); | |
23936cc0 KH |
444 | n += IDR_BITS; |
445 | p = *--paa; | |
446 | } | |
447 | } | |
23936cc0 KH |
448 | idp->layers = 0; |
449 | } | |
fe6e24ec | 450 | EXPORT_SYMBOL(__idr_remove_all); |
23936cc0 | 451 | |
8d3b3591 AM |
452 | /** |
453 | * idr_destroy - release all cached layers within an idr tree | |
ea24ea85 | 454 | * @idp: idr handle |
9bb26bc1 TH |
455 | * |
456 | * Free all id mappings and all idp_layers. After this function, @idp is | |
457 | * completely unused and can be freed / recycled. The caller is | |
458 | * responsible for ensuring that no one else accesses @idp during or after | |
459 | * idr_destroy(). | |
460 | * | |
461 | * A typical clean-up sequence for objects stored in an idr tree will use | |
462 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to | |
463 | * free up the id mappings and cached idr_layers. | |
8d3b3591 AM |
464 | */ |
465 | void idr_destroy(struct idr *idp) | |
466 | { | |
fe6e24ec | 467 | __idr_remove_all(idp); |
9bb26bc1 | 468 | |
8d3b3591 | 469 | while (idp->id_free_cnt) { |
4ae53789 | 470 | struct idr_layer *p = get_from_free_list(idp); |
8d3b3591 AM |
471 | kmem_cache_free(idr_layer_cache, p); |
472 | } | |
473 | } | |
474 | EXPORT_SYMBOL(idr_destroy); | |
475 | ||
1da177e4 LT |
476 | /** |
477 | * idr_find - return pointer for given id | |
478 | * @idp: idr handle | |
479 | * @id: lookup key | |
480 | * | |
481 | * Return the pointer given the id it has been registered with. A %NULL | |
482 | * return indicates that @id is not valid or you passed %NULL in | |
483 | * idr_get_new(). | |
484 | * | |
f9c46d6e ND |
485 | * This function can be called under rcu_read_lock(), given that the leaf |
486 | * pointers lifetimes are correctly managed. | |
1da177e4 LT |
487 | */ |
488 | void *idr_find(struct idr *idp, int id) | |
489 | { | |
490 | int n; | |
491 | struct idr_layer *p; | |
492 | ||
96be753a | 493 | p = rcu_dereference_raw(idp->top); |
6ff2d39b MS |
494 | if (!p) |
495 | return NULL; | |
496 | n = (p->layer+1) * IDR_BITS; | |
1da177e4 LT |
497 | |
498 | /* Mask off upper bits we don't use for the search. */ | |
125c4c70 | 499 | id &= MAX_IDR_MASK; |
1da177e4 LT |
500 | |
501 | if (id >= (1 << n)) | |
502 | return NULL; | |
6ff2d39b | 503 | BUG_ON(n == 0); |
1da177e4 LT |
504 | |
505 | while (n > 0 && p) { | |
506 | n -= IDR_BITS; | |
6ff2d39b | 507 | BUG_ON(n != p->layer*IDR_BITS); |
96be753a | 508 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
1da177e4 LT |
509 | } |
510 | return((void *)p); | |
511 | } | |
512 | EXPORT_SYMBOL(idr_find); | |
513 | ||
96d7fa42 KH |
514 | /** |
515 | * idr_for_each - iterate through all stored pointers | |
516 | * @idp: idr handle | |
517 | * @fn: function to be called for each pointer | |
518 | * @data: data passed back to callback function | |
519 | * | |
520 | * Iterate over the pointers registered with the given idr. The | |
521 | * callback function will be called for each pointer currently | |
522 | * registered, passing the id, the pointer and the data pointer passed | |
523 | * to this function. It is not safe to modify the idr tree while in | |
524 | * the callback, so functions such as idr_get_new and idr_remove are | |
525 | * not allowed. | |
526 | * | |
527 | * We check the return of @fn each time. If it returns anything other | |
56083ab1 | 528 | * than %0, we break out and return that value. |
96d7fa42 KH |
529 | * |
530 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | |
531 | */ | |
532 | int idr_for_each(struct idr *idp, | |
533 | int (*fn)(int id, void *p, void *data), void *data) | |
534 | { | |
535 | int n, id, max, error = 0; | |
536 | struct idr_layer *p; | |
125c4c70 | 537 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
96d7fa42 KH |
538 | struct idr_layer **paa = &pa[0]; |
539 | ||
540 | n = idp->layers * IDR_BITS; | |
96be753a | 541 | p = rcu_dereference_raw(idp->top); |
96d7fa42 KH |
542 | max = 1 << n; |
543 | ||
544 | id = 0; | |
545 | while (id < max) { | |
546 | while (n > 0 && p) { | |
547 | n -= IDR_BITS; | |
548 | *paa++ = p; | |
96be753a | 549 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
96d7fa42 KH |
550 | } |
551 | ||
552 | if (p) { | |
553 | error = fn(id, (void *)p, data); | |
554 | if (error) | |
555 | break; | |
556 | } | |
557 | ||
558 | id += 1 << n; | |
559 | while (n < fls(id)) { | |
560 | n += IDR_BITS; | |
561 | p = *--paa; | |
562 | } | |
563 | } | |
564 | ||
565 | return error; | |
566 | } | |
567 | EXPORT_SYMBOL(idr_for_each); | |
568 | ||
38460b48 KH |
569 | /** |
570 | * idr_get_next - lookup next object of id to given id. | |
571 | * @idp: idr handle | |
ea24ea85 | 572 | * @nextidp: pointer to lookup key |
38460b48 KH |
573 | * |
574 | * Returns pointer to registered object with id, which is next number to | |
1458ce16 NA |
575 | * given id. After being looked up, *@nextidp will be updated for the next |
576 | * iteration. | |
9f7de827 HD |
577 | * |
578 | * This function can be called under rcu_read_lock(), given that the leaf | |
579 | * pointers lifetimes are correctly managed. | |
38460b48 | 580 | */ |
38460b48 KH |
581 | void *idr_get_next(struct idr *idp, int *nextidp) |
582 | { | |
125c4c70 | 583 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; |
38460b48 KH |
584 | struct idr_layer **paa = &pa[0]; |
585 | int id = *nextidp; | |
586 | int n, max; | |
587 | ||
588 | /* find first ent */ | |
94bfa3b6 | 589 | p = rcu_dereference_raw(idp->top); |
38460b48 KH |
590 | if (!p) |
591 | return NULL; | |
9f7de827 HD |
592 | n = (p->layer + 1) * IDR_BITS; |
593 | max = 1 << n; | |
38460b48 KH |
594 | |
595 | while (id < max) { | |
596 | while (n > 0 && p) { | |
597 | n -= IDR_BITS; | |
598 | *paa++ = p; | |
94bfa3b6 | 599 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
38460b48 KH |
600 | } |
601 | ||
602 | if (p) { | |
603 | *nextidp = id; | |
604 | return p; | |
605 | } | |
606 | ||
6cdae741 TH |
607 | /* |
608 | * Proceed to the next layer at the current level. Unlike | |
609 | * idr_for_each(), @id isn't guaranteed to be aligned to | |
610 | * layer boundary at this point and adding 1 << n may | |
611 | * incorrectly skip IDs. Make sure we jump to the | |
612 | * beginning of the next layer using round_up(). | |
613 | */ | |
614 | id = round_up(id + 1, 1 << n); | |
38460b48 KH |
615 | while (n < fls(id)) { |
616 | n += IDR_BITS; | |
617 | p = *--paa; | |
618 | } | |
619 | } | |
620 | return NULL; | |
621 | } | |
4d1ee80f | 622 | EXPORT_SYMBOL(idr_get_next); |
38460b48 KH |
623 | |
624 | ||
5806f07c JM |
625 | /** |
626 | * idr_replace - replace pointer for given id | |
627 | * @idp: idr handle | |
628 | * @ptr: pointer you want associated with the id | |
629 | * @id: lookup key | |
630 | * | |
631 | * Replace the pointer registered with an id and return the old value. | |
56083ab1 RD |
632 | * A %-ENOENT return indicates that @id was not found. |
633 | * A %-EINVAL return indicates that @id was not within valid constraints. | |
5806f07c | 634 | * |
cf481c20 | 635 | * The caller must serialize with writers. |
5806f07c JM |
636 | */ |
637 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
638 | { | |
639 | int n; | |
640 | struct idr_layer *p, *old_p; | |
641 | ||
5806f07c | 642 | p = idp->top; |
6ff2d39b MS |
643 | if (!p) |
644 | return ERR_PTR(-EINVAL); | |
645 | ||
646 | n = (p->layer+1) * IDR_BITS; | |
5806f07c | 647 | |
125c4c70 | 648 | id &= MAX_IDR_MASK; |
5806f07c JM |
649 | |
650 | if (id >= (1 << n)) | |
651 | return ERR_PTR(-EINVAL); | |
652 | ||
653 | n -= IDR_BITS; | |
654 | while ((n > 0) && p) { | |
655 | p = p->ary[(id >> n) & IDR_MASK]; | |
656 | n -= IDR_BITS; | |
657 | } | |
658 | ||
659 | n = id & IDR_MASK; | |
660 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | |
661 | return ERR_PTR(-ENOENT); | |
662 | ||
663 | old_p = p->ary[n]; | |
cf481c20 | 664 | rcu_assign_pointer(p->ary[n], ptr); |
5806f07c JM |
665 | |
666 | return old_p; | |
667 | } | |
668 | EXPORT_SYMBOL(idr_replace); | |
669 | ||
199f0ca5 | 670 | void __init idr_init_cache(void) |
1da177e4 | 671 | { |
199f0ca5 | 672 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
5b019e99 | 673 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
1da177e4 LT |
674 | } |
675 | ||
676 | /** | |
677 | * idr_init - initialize idr handle | |
678 | * @idp: idr handle | |
679 | * | |
680 | * This function is use to set up the handle (@idp) that you will pass | |
681 | * to the rest of the functions. | |
682 | */ | |
683 | void idr_init(struct idr *idp) | |
684 | { | |
1da177e4 LT |
685 | memset(idp, 0, sizeof(struct idr)); |
686 | spin_lock_init(&idp->lock); | |
687 | } | |
688 | EXPORT_SYMBOL(idr_init); | |
72dba584 TH |
689 | |
690 | ||
56083ab1 RD |
691 | /** |
692 | * DOC: IDA description | |
72dba584 TH |
693 | * IDA - IDR based ID allocator |
694 | * | |
56083ab1 | 695 | * This is id allocator without id -> pointer translation. Memory |
72dba584 TH |
696 | * usage is much lower than full blown idr because each id only |
697 | * occupies a bit. ida uses a custom leaf node which contains | |
698 | * IDA_BITMAP_BITS slots. | |
699 | * | |
700 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | |
701 | */ | |
702 | ||
703 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |
704 | { | |
705 | unsigned long flags; | |
706 | ||
707 | if (!ida->free_bitmap) { | |
708 | spin_lock_irqsave(&ida->idr.lock, flags); | |
709 | if (!ida->free_bitmap) { | |
710 | ida->free_bitmap = bitmap; | |
711 | bitmap = NULL; | |
712 | } | |
713 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
714 | } | |
715 | ||
716 | kfree(bitmap); | |
717 | } | |
718 | ||
719 | /** | |
720 | * ida_pre_get - reserve resources for ida allocation | |
721 | * @ida: ida handle | |
722 | * @gfp_mask: memory allocation flag | |
723 | * | |
724 | * This function should be called prior to locking and calling the | |
725 | * following function. It preallocates enough memory to satisfy the | |
726 | * worst possible allocation. | |
727 | * | |
56083ab1 RD |
728 | * If the system is REALLY out of memory this function returns %0, |
729 | * otherwise %1. | |
72dba584 TH |
730 | */ |
731 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | |
732 | { | |
733 | /* allocate idr_layers */ | |
734 | if (!idr_pre_get(&ida->idr, gfp_mask)) | |
735 | return 0; | |
736 | ||
737 | /* allocate free_bitmap */ | |
738 | if (!ida->free_bitmap) { | |
739 | struct ida_bitmap *bitmap; | |
740 | ||
741 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | |
742 | if (!bitmap) | |
743 | return 0; | |
744 | ||
745 | free_bitmap(ida, bitmap); | |
746 | } | |
747 | ||
748 | return 1; | |
749 | } | |
750 | EXPORT_SYMBOL(ida_pre_get); | |
751 | ||
752 | /** | |
753 | * ida_get_new_above - allocate new ID above or equal to a start id | |
754 | * @ida: ida handle | |
ea24ea85 | 755 | * @starting_id: id to start search at |
72dba584 TH |
756 | * @p_id: pointer to the allocated handle |
757 | * | |
e3816c54 WSH |
758 | * Allocate new ID above or equal to @starting_id. It should be called |
759 | * with any required locks. | |
72dba584 | 760 | * |
56083ab1 | 761 | * If memory is required, it will return %-EAGAIN, you should unlock |
72dba584 | 762 | * and go back to the ida_pre_get() call. If the ida is full, it will |
56083ab1 | 763 | * return %-ENOSPC. |
72dba584 | 764 | * |
56083ab1 | 765 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
72dba584 TH |
766 | */ |
767 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |
768 | { | |
125c4c70 | 769 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
72dba584 TH |
770 | struct ida_bitmap *bitmap; |
771 | unsigned long flags; | |
772 | int idr_id = starting_id / IDA_BITMAP_BITS; | |
773 | int offset = starting_id % IDA_BITMAP_BITS; | |
774 | int t, id; | |
775 | ||
776 | restart: | |
777 | /* get vacant slot */ | |
778 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | |
944ca05c | 779 | if (t < 0) |
12d1b439 | 780 | return t == -ENOMEM ? -EAGAIN : t; |
72dba584 | 781 | |
125c4c70 | 782 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
72dba584 TH |
783 | return -ENOSPC; |
784 | ||
785 | if (t != idr_id) | |
786 | offset = 0; | |
787 | idr_id = t; | |
788 | ||
789 | /* if bitmap isn't there, create a new one */ | |
790 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | |
791 | if (!bitmap) { | |
792 | spin_lock_irqsave(&ida->idr.lock, flags); | |
793 | bitmap = ida->free_bitmap; | |
794 | ida->free_bitmap = NULL; | |
795 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
796 | ||
797 | if (!bitmap) | |
798 | return -EAGAIN; | |
799 | ||
800 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | |
3219b3b7 ND |
801 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
802 | (void *)bitmap); | |
72dba584 TH |
803 | pa[0]->count++; |
804 | } | |
805 | ||
806 | /* lookup for empty slot */ | |
807 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | |
808 | if (t == IDA_BITMAP_BITS) { | |
809 | /* no empty slot after offset, continue to the next chunk */ | |
810 | idr_id++; | |
811 | offset = 0; | |
812 | goto restart; | |
813 | } | |
814 | ||
815 | id = idr_id * IDA_BITMAP_BITS + t; | |
125c4c70 | 816 | if (id >= MAX_IDR_BIT) |
72dba584 TH |
817 | return -ENOSPC; |
818 | ||
819 | __set_bit(t, bitmap->bitmap); | |
820 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | |
821 | idr_mark_full(pa, idr_id); | |
822 | ||
823 | *p_id = id; | |
824 | ||
825 | /* Each leaf node can handle nearly a thousand slots and the | |
826 | * whole idea of ida is to have small memory foot print. | |
827 | * Throw away extra resources one by one after each successful | |
828 | * allocation. | |
829 | */ | |
830 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | |
4ae53789 | 831 | struct idr_layer *p = get_from_free_list(&ida->idr); |
72dba584 TH |
832 | if (p) |
833 | kmem_cache_free(idr_layer_cache, p); | |
834 | } | |
835 | ||
836 | return 0; | |
837 | } | |
838 | EXPORT_SYMBOL(ida_get_new_above); | |
839 | ||
72dba584 TH |
840 | /** |
841 | * ida_remove - remove the given ID | |
842 | * @ida: ida handle | |
843 | * @id: ID to free | |
844 | */ | |
845 | void ida_remove(struct ida *ida, int id) | |
846 | { | |
847 | struct idr_layer *p = ida->idr.top; | |
848 | int shift = (ida->idr.layers - 1) * IDR_BITS; | |
849 | int idr_id = id / IDA_BITMAP_BITS; | |
850 | int offset = id % IDA_BITMAP_BITS; | |
851 | int n; | |
852 | struct ida_bitmap *bitmap; | |
853 | ||
854 | /* clear full bits while looking up the leaf idr_layer */ | |
855 | while ((shift > 0) && p) { | |
856 | n = (idr_id >> shift) & IDR_MASK; | |
857 | __clear_bit(n, &p->bitmap); | |
858 | p = p->ary[n]; | |
859 | shift -= IDR_BITS; | |
860 | } | |
861 | ||
862 | if (p == NULL) | |
863 | goto err; | |
864 | ||
865 | n = idr_id & IDR_MASK; | |
866 | __clear_bit(n, &p->bitmap); | |
867 | ||
868 | bitmap = (void *)p->ary[n]; | |
869 | if (!test_bit(offset, bitmap->bitmap)) | |
870 | goto err; | |
871 | ||
872 | /* update bitmap and remove it if empty */ | |
873 | __clear_bit(offset, bitmap->bitmap); | |
874 | if (--bitmap->nr_busy == 0) { | |
875 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | |
876 | idr_remove(&ida->idr, idr_id); | |
877 | free_bitmap(ida, bitmap); | |
878 | } | |
879 | ||
880 | return; | |
881 | ||
882 | err: | |
883 | printk(KERN_WARNING | |
884 | "ida_remove called for id=%d which is not allocated.\n", id); | |
885 | } | |
886 | EXPORT_SYMBOL(ida_remove); | |
887 | ||
888 | /** | |
889 | * ida_destroy - release all cached layers within an ida tree | |
ea24ea85 | 890 | * @ida: ida handle |
72dba584 TH |
891 | */ |
892 | void ida_destroy(struct ida *ida) | |
893 | { | |
894 | idr_destroy(&ida->idr); | |
895 | kfree(ida->free_bitmap); | |
896 | } | |
897 | EXPORT_SYMBOL(ida_destroy); | |
898 | ||
88eca020 RR |
899 | /** |
900 | * ida_simple_get - get a new id. | |
901 | * @ida: the (initialized) ida. | |
902 | * @start: the minimum id (inclusive, < 0x8000000) | |
903 | * @end: the maximum id (exclusive, < 0x8000000 or 0) | |
904 | * @gfp_mask: memory allocation flags | |
905 | * | |
906 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. | |
907 | * On memory allocation failure, returns -ENOMEM. | |
908 | * | |
909 | * Use ida_simple_remove() to get rid of an id. | |
910 | */ | |
911 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | |
912 | gfp_t gfp_mask) | |
913 | { | |
914 | int ret, id; | |
915 | unsigned int max; | |
46cbc1d3 | 916 | unsigned long flags; |
88eca020 RR |
917 | |
918 | BUG_ON((int)start < 0); | |
919 | BUG_ON((int)end < 0); | |
920 | ||
921 | if (end == 0) | |
922 | max = 0x80000000; | |
923 | else { | |
924 | BUG_ON(end < start); | |
925 | max = end - 1; | |
926 | } | |
927 | ||
928 | again: | |
929 | if (!ida_pre_get(ida, gfp_mask)) | |
930 | return -ENOMEM; | |
931 | ||
46cbc1d3 | 932 | spin_lock_irqsave(&simple_ida_lock, flags); |
88eca020 RR |
933 | ret = ida_get_new_above(ida, start, &id); |
934 | if (!ret) { | |
935 | if (id > max) { | |
936 | ida_remove(ida, id); | |
937 | ret = -ENOSPC; | |
938 | } else { | |
939 | ret = id; | |
940 | } | |
941 | } | |
46cbc1d3 | 942 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
88eca020 RR |
943 | |
944 | if (unlikely(ret == -EAGAIN)) | |
945 | goto again; | |
946 | ||
947 | return ret; | |
948 | } | |
949 | EXPORT_SYMBOL(ida_simple_get); | |
950 | ||
951 | /** | |
952 | * ida_simple_remove - remove an allocated id. | |
953 | * @ida: the (initialized) ida. | |
954 | * @id: the id returned by ida_simple_get. | |
955 | */ | |
956 | void ida_simple_remove(struct ida *ida, unsigned int id) | |
957 | { | |
46cbc1d3 TH |
958 | unsigned long flags; |
959 | ||
88eca020 | 960 | BUG_ON((int)id < 0); |
46cbc1d3 | 961 | spin_lock_irqsave(&simple_ida_lock, flags); |
88eca020 | 962 | ida_remove(ida, id); |
46cbc1d3 | 963 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
88eca020 RR |
964 | } |
965 | EXPORT_SYMBOL(ida_simple_remove); | |
966 | ||
72dba584 TH |
967 | /** |
968 | * ida_init - initialize ida handle | |
969 | * @ida: ida handle | |
970 | * | |
971 | * This function is use to set up the handle (@ida) that you will pass | |
972 | * to the rest of the functions. | |
973 | */ | |
974 | void ida_init(struct ida *ida) | |
975 | { | |
976 | memset(ida, 0, sizeof(struct ida)); | |
977 | idr_init(&ida->idr); | |
978 | ||
979 | } | |
980 | EXPORT_SYMBOL(ida_init); |