Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
3219b3b7 ND |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * | |
e15ae2dd | 11 | * Small id to pointer translation service. |
1da177e4 | 12 | * |
e15ae2dd | 13 | * It uses a radix tree like structure as a sparse array indexed |
1da177e4 | 14 | * by the id to obtain the pointer. The bitmap makes allocating |
e15ae2dd | 15 | * a new id quick. |
1da177e4 LT |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a | |
18 | * pointer or what ever, we treat it as a (void *). You can pass this | |
19 | * id to a user for him to pass back at a later time. You then pass | |
20 | * that id to this code and it returns your pointer. | |
21 | ||
e15ae2dd | 22 | * You can release ids at any time. When all ids are released, most of |
1da177e4 | 23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
e15ae2dd | 24 | * don't need to go to the memory "store" during an id allocate, just |
1da177e4 LT |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. | |
27 | */ | |
28 | ||
29 | #ifndef TEST // to test in user space... | |
30 | #include <linux/slab.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/module.h> | |
33 | #endif | |
5806f07c | 34 | #include <linux/err.h> |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | |
37 | ||
e18b890b | 38 | static struct kmem_cache *idr_layer_cache; |
1da177e4 | 39 | |
4ae53789 | 40 | static struct idr_layer *get_from_free_list(struct idr *idp) |
1da177e4 LT |
41 | { |
42 | struct idr_layer *p; | |
c259cc28 | 43 | unsigned long flags; |
1da177e4 | 44 | |
c259cc28 | 45 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
46 | if ((p = idp->id_free)) { |
47 | idp->id_free = p->ary[0]; | |
48 | idp->id_free_cnt--; | |
49 | p->ary[0] = NULL; | |
50 | } | |
c259cc28 | 51 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
52 | return(p); |
53 | } | |
54 | ||
1eec0056 | 55 | /* only called when idp->lock is held */ |
4ae53789 | 56 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
1eec0056 SR |
57 | { |
58 | p->ary[0] = idp->id_free; | |
59 | idp->id_free = p; | |
60 | idp->id_free_cnt++; | |
61 | } | |
62 | ||
4ae53789 | 63 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
1da177e4 | 64 | { |
c259cc28 RD |
65 | unsigned long flags; |
66 | ||
1da177e4 LT |
67 | /* |
68 | * Depends on the return element being zeroed. | |
69 | */ | |
c259cc28 | 70 | spin_lock_irqsave(&idp->lock, flags); |
4ae53789 | 71 | __move_to_free_list(idp, p); |
c259cc28 | 72 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
73 | } |
74 | ||
e33ac8bd TH |
75 | static void idr_mark_full(struct idr_layer **pa, int id) |
76 | { | |
77 | struct idr_layer *p = pa[0]; | |
78 | int l = 0; | |
79 | ||
80 | __set_bit(id & IDR_MASK, &p->bitmap); | |
81 | /* | |
82 | * If this layer is full mark the bit in the layer above to | |
83 | * show that this part of the radix tree is full. This may | |
84 | * complete the layer above and require walking up the radix | |
85 | * tree. | |
86 | */ | |
87 | while (p->bitmap == IDR_FULL) { | |
88 | if (!(p = pa[++l])) | |
89 | break; | |
90 | id = id >> IDR_BITS; | |
91 | __set_bit((id & IDR_MASK), &p->bitmap); | |
92 | } | |
93 | } | |
94 | ||
1da177e4 LT |
95 | /** |
96 | * idr_pre_get - reserver resources for idr allocation | |
97 | * @idp: idr handle | |
98 | * @gfp_mask: memory allocation flags | |
99 | * | |
100 | * This function should be called prior to locking and calling the | |
3219b3b7 | 101 | * idr_get_new* functions. It preallocates enough memory to satisfy |
1da177e4 LT |
102 | * the worst possible allocation. |
103 | * | |
104 | * If the system is REALLY out of memory this function returns 0, | |
105 | * otherwise 1. | |
106 | */ | |
fd4f2df2 | 107 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
1da177e4 LT |
108 | { |
109 | while (idp->id_free_cnt < IDR_FREE_MAX) { | |
110 | struct idr_layer *new; | |
111 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | |
e15ae2dd | 112 | if (new == NULL) |
1da177e4 | 113 | return (0); |
4ae53789 | 114 | move_to_free_list(idp, new); |
1da177e4 LT |
115 | } |
116 | return 1; | |
117 | } | |
118 | EXPORT_SYMBOL(idr_pre_get); | |
119 | ||
e33ac8bd | 120 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
1da177e4 LT |
121 | { |
122 | int n, m, sh; | |
123 | struct idr_layer *p, *new; | |
7aae6dd8 | 124 | int l, id, oid; |
5ba25331 | 125 | unsigned long bm; |
1da177e4 LT |
126 | |
127 | id = *starting_id; | |
7aae6dd8 | 128 | restart: |
1da177e4 LT |
129 | p = idp->top; |
130 | l = idp->layers; | |
131 | pa[l--] = NULL; | |
132 | while (1) { | |
133 | /* | |
134 | * We run around this while until we reach the leaf node... | |
135 | */ | |
136 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
137 | bm = ~p->bitmap; | |
138 | m = find_next_bit(&bm, IDR_SIZE, n); | |
139 | if (m == IDR_SIZE) { | |
140 | /* no space available go back to previous layer. */ | |
141 | l++; | |
7aae6dd8 | 142 | oid = id; |
e15ae2dd | 143 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
7aae6dd8 TH |
144 | |
145 | /* if already at the top layer, we need to grow */ | |
1da177e4 LT |
146 | if (!(p = pa[l])) { |
147 | *starting_id = id; | |
944ca05c | 148 | return IDR_NEED_TO_GROW; |
1da177e4 | 149 | } |
7aae6dd8 TH |
150 | |
151 | /* If we need to go up one layer, continue the | |
152 | * loop; otherwise, restart from the top. | |
153 | */ | |
154 | sh = IDR_BITS * (l + 1); | |
155 | if (oid >> sh == id >> sh) | |
156 | continue; | |
157 | else | |
158 | goto restart; | |
1da177e4 LT |
159 | } |
160 | if (m != n) { | |
161 | sh = IDR_BITS*l; | |
162 | id = ((id >> sh) ^ n ^ m) << sh; | |
163 | } | |
164 | if ((id >= MAX_ID_BIT) || (id < 0)) | |
944ca05c | 165 | return IDR_NOMORE_SPACE; |
1da177e4 LT |
166 | if (l == 0) |
167 | break; | |
168 | /* | |
169 | * Create the layer below if it is missing. | |
170 | */ | |
171 | if (!p->ary[m]) { | |
4ae53789 ND |
172 | new = get_from_free_list(idp); |
173 | if (!new) | |
1da177e4 | 174 | return -1; |
3219b3b7 | 175 | rcu_assign_pointer(p->ary[m], new); |
1da177e4 LT |
176 | p->count++; |
177 | } | |
178 | pa[l--] = p; | |
179 | p = p->ary[m]; | |
180 | } | |
e33ac8bd TH |
181 | |
182 | pa[l] = p; | |
183 | return id; | |
1da177e4 LT |
184 | } |
185 | ||
e33ac8bd TH |
186 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
187 | struct idr_layer **pa) | |
1da177e4 LT |
188 | { |
189 | struct idr_layer *p, *new; | |
190 | int layers, v, id; | |
c259cc28 | 191 | unsigned long flags; |
e15ae2dd | 192 | |
1da177e4 LT |
193 | id = starting_id; |
194 | build_up: | |
195 | p = idp->top; | |
196 | layers = idp->layers; | |
197 | if (unlikely(!p)) { | |
4ae53789 | 198 | if (!(p = get_from_free_list(idp))) |
1da177e4 LT |
199 | return -1; |
200 | layers = 1; | |
201 | } | |
202 | /* | |
203 | * Add a new layer to the top of the tree if the requested | |
204 | * id is larger than the currently allocated space. | |
205 | */ | |
589777ea | 206 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
1da177e4 LT |
207 | layers++; |
208 | if (!p->count) | |
209 | continue; | |
4ae53789 | 210 | if (!(new = get_from_free_list(idp))) { |
1da177e4 LT |
211 | /* |
212 | * The allocation failed. If we built part of | |
213 | * the structure tear it down. | |
214 | */ | |
c259cc28 | 215 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
216 | for (new = p; p && p != idp->top; new = p) { |
217 | p = p->ary[0]; | |
218 | new->ary[0] = NULL; | |
219 | new->bitmap = new->count = 0; | |
4ae53789 | 220 | __move_to_free_list(idp, new); |
1da177e4 | 221 | } |
c259cc28 | 222 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
223 | return -1; |
224 | } | |
225 | new->ary[0] = p; | |
226 | new->count = 1; | |
227 | if (p->bitmap == IDR_FULL) | |
228 | __set_bit(0, &new->bitmap); | |
229 | p = new; | |
230 | } | |
3219b3b7 | 231 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 232 | idp->layers = layers; |
e33ac8bd | 233 | v = sub_alloc(idp, &id, pa); |
944ca05c | 234 | if (v == IDR_NEED_TO_GROW) |
1da177e4 LT |
235 | goto build_up; |
236 | return(v); | |
237 | } | |
238 | ||
e33ac8bd TH |
239 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
240 | { | |
241 | struct idr_layer *pa[MAX_LEVEL]; | |
242 | int id; | |
243 | ||
244 | id = idr_get_empty_slot(idp, starting_id, pa); | |
245 | if (id >= 0) { | |
246 | /* | |
247 | * Successfully found an empty slot. Install the user | |
248 | * pointer and mark the slot full. | |
249 | */ | |
3219b3b7 ND |
250 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
251 | (struct idr_layer *)ptr); | |
e33ac8bd TH |
252 | pa[0]->count++; |
253 | idr_mark_full(pa, id); | |
254 | } | |
255 | ||
256 | return id; | |
257 | } | |
258 | ||
1da177e4 | 259 | /** |
7c657f2f | 260 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
1da177e4 LT |
261 | * @idp: idr handle |
262 | * @ptr: pointer you want associated with the ide | |
263 | * @start_id: id to start search at | |
264 | * @id: pointer to the allocated handle | |
265 | * | |
266 | * This is the allocate id function. It should be called with any | |
267 | * required locks. | |
268 | * | |
269 | * If memory is required, it will return -EAGAIN, you should unlock | |
270 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
271 | * return -ENOSPC. | |
272 | * | |
273 | * @id returns a value in the range 0 ... 0x7fffffff | |
274 | */ | |
275 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
276 | { | |
277 | int rv; | |
e15ae2dd | 278 | |
1da177e4 LT |
279 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
280 | /* | |
281 | * This is a cheap hack until the IDR code can be fixed to | |
282 | * return proper error values. | |
283 | */ | |
944ca05c ND |
284 | if (rv < 0) |
285 | return _idr_rc_to_errno(rv); | |
1da177e4 LT |
286 | *id = rv; |
287 | return 0; | |
288 | } | |
289 | EXPORT_SYMBOL(idr_get_new_above); | |
290 | ||
291 | /** | |
292 | * idr_get_new - allocate new idr entry | |
293 | * @idp: idr handle | |
294 | * @ptr: pointer you want associated with the ide | |
295 | * @id: pointer to the allocated handle | |
296 | * | |
297 | * This is the allocate id function. It should be called with any | |
298 | * required locks. | |
299 | * | |
300 | * If memory is required, it will return -EAGAIN, you should unlock | |
301 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
302 | * return -ENOSPC. | |
303 | * | |
304 | * @id returns a value in the range 0 ... 0x7fffffff | |
305 | */ | |
306 | int idr_get_new(struct idr *idp, void *ptr, int *id) | |
307 | { | |
308 | int rv; | |
e15ae2dd | 309 | |
1da177e4 LT |
310 | rv = idr_get_new_above_int(idp, ptr, 0); |
311 | /* | |
312 | * This is a cheap hack until the IDR code can be fixed to | |
313 | * return proper error values. | |
314 | */ | |
944ca05c ND |
315 | if (rv < 0) |
316 | return _idr_rc_to_errno(rv); | |
1da177e4 LT |
317 | *id = rv; |
318 | return 0; | |
319 | } | |
320 | EXPORT_SYMBOL(idr_get_new); | |
321 | ||
322 | static void idr_remove_warning(int id) | |
323 | { | |
f098ad65 ND |
324 | printk(KERN_WARNING |
325 | "idr_remove called for id=%d which is not allocated.\n", id); | |
1da177e4 LT |
326 | dump_stack(); |
327 | } | |
328 | ||
329 | static void sub_remove(struct idr *idp, int shift, int id) | |
330 | { | |
331 | struct idr_layer *p = idp->top; | |
332 | struct idr_layer **pa[MAX_LEVEL]; | |
333 | struct idr_layer ***paa = &pa[0]; | |
334 | int n; | |
335 | ||
336 | *paa = NULL; | |
337 | *++paa = &idp->top; | |
338 | ||
339 | while ((shift > 0) && p) { | |
340 | n = (id >> shift) & IDR_MASK; | |
341 | __clear_bit(n, &p->bitmap); | |
342 | *++paa = &p->ary[n]; | |
343 | p = p->ary[n]; | |
344 | shift -= IDR_BITS; | |
345 | } | |
346 | n = id & IDR_MASK; | |
347 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | |
348 | __clear_bit(n, &p->bitmap); | |
349 | p->ary[n] = NULL; | |
350 | while(*paa && ! --((**paa)->count)){ | |
4ae53789 | 351 | move_to_free_list(idp, **paa); |
1da177e4 LT |
352 | **paa-- = NULL; |
353 | } | |
e15ae2dd | 354 | if (!*paa) |
1da177e4 | 355 | idp->layers = 0; |
e15ae2dd | 356 | } else |
1da177e4 | 357 | idr_remove_warning(id); |
1da177e4 LT |
358 | } |
359 | ||
360 | /** | |
361 | * idr_remove - remove the given id and free it's slot | |
72fd4a35 RD |
362 | * @idp: idr handle |
363 | * @id: unique key | |
1da177e4 LT |
364 | */ |
365 | void idr_remove(struct idr *idp, int id) | |
366 | { | |
367 | struct idr_layer *p; | |
368 | ||
369 | /* Mask off upper bits we don't use for the search. */ | |
370 | id &= MAX_ID_MASK; | |
371 | ||
372 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
e15ae2dd JJ |
373 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
374 | idp->top->ary[0]) { // We can drop a layer | |
1da177e4 LT |
375 | |
376 | p = idp->top->ary[0]; | |
377 | idp->top->bitmap = idp->top->count = 0; | |
4ae53789 | 378 | move_to_free_list(idp, idp->top); |
1da177e4 LT |
379 | idp->top = p; |
380 | --idp->layers; | |
381 | } | |
382 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | |
4ae53789 | 383 | p = get_from_free_list(idp); |
1da177e4 | 384 | kmem_cache_free(idr_layer_cache, p); |
1da177e4 | 385 | } |
af8e2a4c | 386 | return; |
1da177e4 LT |
387 | } |
388 | EXPORT_SYMBOL(idr_remove); | |
389 | ||
23936cc0 KH |
390 | /** |
391 | * idr_remove_all - remove all ids from the given idr tree | |
392 | * @idp: idr handle | |
393 | * | |
394 | * idr_destroy() only frees up unused, cached idp_layers, but this | |
395 | * function will remove all id mappings and leave all idp_layers | |
396 | * unused. | |
397 | * | |
398 | * A typical clean-up sequence for objects stored in an idr tree, will | |
399 | * use idr_for_each() to free all objects, if necessay, then | |
400 | * idr_remove_all() to remove all ids, and idr_destroy() to free | |
401 | * up the cached idr_layers. | |
402 | */ | |
403 | void idr_remove_all(struct idr *idp) | |
404 | { | |
6ace06dc | 405 | int n, id, max; |
23936cc0 KH |
406 | struct idr_layer *p; |
407 | struct idr_layer *pa[MAX_LEVEL]; | |
408 | struct idr_layer **paa = &pa[0]; | |
409 | ||
410 | n = idp->layers * IDR_BITS; | |
411 | p = idp->top; | |
412 | max = 1 << n; | |
413 | ||
414 | id = 0; | |
6ace06dc | 415 | while (id < max) { |
23936cc0 KH |
416 | while (n > IDR_BITS && p) { |
417 | n -= IDR_BITS; | |
418 | *paa++ = p; | |
419 | p = p->ary[(id >> n) & IDR_MASK]; | |
420 | } | |
421 | ||
422 | id += 1 << n; | |
423 | while (n < fls(id)) { | |
424 | if (p) { | |
425 | memset(p, 0, sizeof *p); | |
4ae53789 | 426 | move_to_free_list(idp, p); |
23936cc0 KH |
427 | } |
428 | n += IDR_BITS; | |
429 | p = *--paa; | |
430 | } | |
431 | } | |
432 | idp->top = NULL; | |
433 | idp->layers = 0; | |
434 | } | |
435 | EXPORT_SYMBOL(idr_remove_all); | |
436 | ||
8d3b3591 AM |
437 | /** |
438 | * idr_destroy - release all cached layers within an idr tree | |
439 | * idp: idr handle | |
440 | */ | |
441 | void idr_destroy(struct idr *idp) | |
442 | { | |
443 | while (idp->id_free_cnt) { | |
4ae53789 | 444 | struct idr_layer *p = get_from_free_list(idp); |
8d3b3591 AM |
445 | kmem_cache_free(idr_layer_cache, p); |
446 | } | |
447 | } | |
448 | EXPORT_SYMBOL(idr_destroy); | |
449 | ||
1da177e4 LT |
450 | /** |
451 | * idr_find - return pointer for given id | |
452 | * @idp: idr handle | |
453 | * @id: lookup key | |
454 | * | |
455 | * Return the pointer given the id it has been registered with. A %NULL | |
456 | * return indicates that @id is not valid or you passed %NULL in | |
457 | * idr_get_new(). | |
458 | * | |
f9c46d6e ND |
459 | * This function can be called under rcu_read_lock(), given that the leaf |
460 | * pointers lifetimes are correctly managed. | |
1da177e4 LT |
461 | */ |
462 | void *idr_find(struct idr *idp, int id) | |
463 | { | |
464 | int n; | |
465 | struct idr_layer *p; | |
466 | ||
467 | n = idp->layers * IDR_BITS; | |
f9c46d6e | 468 | p = rcu_dereference(idp->top); |
1da177e4 LT |
469 | |
470 | /* Mask off upper bits we don't use for the search. */ | |
471 | id &= MAX_ID_MASK; | |
472 | ||
473 | if (id >= (1 << n)) | |
474 | return NULL; | |
475 | ||
476 | while (n > 0 && p) { | |
477 | n -= IDR_BITS; | |
f9c46d6e | 478 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
1da177e4 LT |
479 | } |
480 | return((void *)p); | |
481 | } | |
482 | EXPORT_SYMBOL(idr_find); | |
483 | ||
96d7fa42 KH |
484 | /** |
485 | * idr_for_each - iterate through all stored pointers | |
486 | * @idp: idr handle | |
487 | * @fn: function to be called for each pointer | |
488 | * @data: data passed back to callback function | |
489 | * | |
490 | * Iterate over the pointers registered with the given idr. The | |
491 | * callback function will be called for each pointer currently | |
492 | * registered, passing the id, the pointer and the data pointer passed | |
493 | * to this function. It is not safe to modify the idr tree while in | |
494 | * the callback, so functions such as idr_get_new and idr_remove are | |
495 | * not allowed. | |
496 | * | |
497 | * We check the return of @fn each time. If it returns anything other | |
498 | * than 0, we break out and return that value. | |
499 | * | |
500 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | |
501 | */ | |
502 | int idr_for_each(struct idr *idp, | |
503 | int (*fn)(int id, void *p, void *data), void *data) | |
504 | { | |
505 | int n, id, max, error = 0; | |
506 | struct idr_layer *p; | |
507 | struct idr_layer *pa[MAX_LEVEL]; | |
508 | struct idr_layer **paa = &pa[0]; | |
509 | ||
510 | n = idp->layers * IDR_BITS; | |
f9c46d6e | 511 | p = rcu_dereference(idp->top); |
96d7fa42 KH |
512 | max = 1 << n; |
513 | ||
514 | id = 0; | |
515 | while (id < max) { | |
516 | while (n > 0 && p) { | |
517 | n -= IDR_BITS; | |
518 | *paa++ = p; | |
f9c46d6e | 519 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
96d7fa42 KH |
520 | } |
521 | ||
522 | if (p) { | |
523 | error = fn(id, (void *)p, data); | |
524 | if (error) | |
525 | break; | |
526 | } | |
527 | ||
528 | id += 1 << n; | |
529 | while (n < fls(id)) { | |
530 | n += IDR_BITS; | |
531 | p = *--paa; | |
532 | } | |
533 | } | |
534 | ||
535 | return error; | |
536 | } | |
537 | EXPORT_SYMBOL(idr_for_each); | |
538 | ||
5806f07c JM |
539 | /** |
540 | * idr_replace - replace pointer for given id | |
541 | * @idp: idr handle | |
542 | * @ptr: pointer you want associated with the id | |
543 | * @id: lookup key | |
544 | * | |
545 | * Replace the pointer registered with an id and return the old value. | |
546 | * A -ENOENT return indicates that @id was not found. | |
547 | * A -EINVAL return indicates that @id was not within valid constraints. | |
548 | * | |
549 | * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). | |
550 | */ | |
551 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
552 | { | |
553 | int n; | |
554 | struct idr_layer *p, *old_p; | |
555 | ||
556 | n = idp->layers * IDR_BITS; | |
557 | p = idp->top; | |
558 | ||
559 | id &= MAX_ID_MASK; | |
560 | ||
561 | if (id >= (1 << n)) | |
562 | return ERR_PTR(-EINVAL); | |
563 | ||
564 | n -= IDR_BITS; | |
565 | while ((n > 0) && p) { | |
566 | p = p->ary[(id >> n) & IDR_MASK]; | |
567 | n -= IDR_BITS; | |
568 | } | |
569 | ||
570 | n = id & IDR_MASK; | |
571 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | |
572 | return ERR_PTR(-ENOENT); | |
573 | ||
574 | old_p = p->ary[n]; | |
575 | p->ary[n] = ptr; | |
576 | ||
577 | return old_p; | |
578 | } | |
579 | EXPORT_SYMBOL(idr_replace); | |
580 | ||
4ba9b9d0 | 581 | static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) |
1da177e4 LT |
582 | { |
583 | memset(idr_layer, 0, sizeof(struct idr_layer)); | |
584 | } | |
585 | ||
199f0ca5 | 586 | void __init idr_init_cache(void) |
1da177e4 | 587 | { |
199f0ca5 AM |
588 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
589 | sizeof(struct idr_layer), 0, SLAB_PANIC, | |
590 | idr_cache_ctor); | |
1da177e4 LT |
591 | } |
592 | ||
593 | /** | |
594 | * idr_init - initialize idr handle | |
595 | * @idp: idr handle | |
596 | * | |
597 | * This function is use to set up the handle (@idp) that you will pass | |
598 | * to the rest of the functions. | |
599 | */ | |
600 | void idr_init(struct idr *idp) | |
601 | { | |
1da177e4 LT |
602 | memset(idp, 0, sizeof(struct idr)); |
603 | spin_lock_init(&idp->lock); | |
604 | } | |
605 | EXPORT_SYMBOL(idr_init); | |
72dba584 TH |
606 | |
607 | ||
608 | /* | |
609 | * IDA - IDR based ID allocator | |
610 | * | |
611 | * this is id allocator without id -> pointer translation. Memory | |
612 | * usage is much lower than full blown idr because each id only | |
613 | * occupies a bit. ida uses a custom leaf node which contains | |
614 | * IDA_BITMAP_BITS slots. | |
615 | * | |
616 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | |
617 | */ | |
618 | ||
619 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |
620 | { | |
621 | unsigned long flags; | |
622 | ||
623 | if (!ida->free_bitmap) { | |
624 | spin_lock_irqsave(&ida->idr.lock, flags); | |
625 | if (!ida->free_bitmap) { | |
626 | ida->free_bitmap = bitmap; | |
627 | bitmap = NULL; | |
628 | } | |
629 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
630 | } | |
631 | ||
632 | kfree(bitmap); | |
633 | } | |
634 | ||
635 | /** | |
636 | * ida_pre_get - reserve resources for ida allocation | |
637 | * @ida: ida handle | |
638 | * @gfp_mask: memory allocation flag | |
639 | * | |
640 | * This function should be called prior to locking and calling the | |
641 | * following function. It preallocates enough memory to satisfy the | |
642 | * worst possible allocation. | |
643 | * | |
644 | * If the system is REALLY out of memory this function returns 0, | |
645 | * otherwise 1. | |
646 | */ | |
647 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | |
648 | { | |
649 | /* allocate idr_layers */ | |
650 | if (!idr_pre_get(&ida->idr, gfp_mask)) | |
651 | return 0; | |
652 | ||
653 | /* allocate free_bitmap */ | |
654 | if (!ida->free_bitmap) { | |
655 | struct ida_bitmap *bitmap; | |
656 | ||
657 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | |
658 | if (!bitmap) | |
659 | return 0; | |
660 | ||
661 | free_bitmap(ida, bitmap); | |
662 | } | |
663 | ||
664 | return 1; | |
665 | } | |
666 | EXPORT_SYMBOL(ida_pre_get); | |
667 | ||
668 | /** | |
669 | * ida_get_new_above - allocate new ID above or equal to a start id | |
670 | * @ida: ida handle | |
671 | * @staring_id: id to start search at | |
672 | * @p_id: pointer to the allocated handle | |
673 | * | |
674 | * Allocate new ID above or equal to @ida. It should be called with | |
675 | * any required locks. | |
676 | * | |
677 | * If memory is required, it will return -EAGAIN, you should unlock | |
678 | * and go back to the ida_pre_get() call. If the ida is full, it will | |
679 | * return -ENOSPC. | |
680 | * | |
681 | * @p_id returns a value in the range 0 ... 0x7fffffff. | |
682 | */ | |
683 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |
684 | { | |
685 | struct idr_layer *pa[MAX_LEVEL]; | |
686 | struct ida_bitmap *bitmap; | |
687 | unsigned long flags; | |
688 | int idr_id = starting_id / IDA_BITMAP_BITS; | |
689 | int offset = starting_id % IDA_BITMAP_BITS; | |
690 | int t, id; | |
691 | ||
692 | restart: | |
693 | /* get vacant slot */ | |
694 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | |
944ca05c ND |
695 | if (t < 0) |
696 | return _idr_rc_to_errno(t); | |
72dba584 TH |
697 | |
698 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | |
699 | return -ENOSPC; | |
700 | ||
701 | if (t != idr_id) | |
702 | offset = 0; | |
703 | idr_id = t; | |
704 | ||
705 | /* if bitmap isn't there, create a new one */ | |
706 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | |
707 | if (!bitmap) { | |
708 | spin_lock_irqsave(&ida->idr.lock, flags); | |
709 | bitmap = ida->free_bitmap; | |
710 | ida->free_bitmap = NULL; | |
711 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
712 | ||
713 | if (!bitmap) | |
714 | return -EAGAIN; | |
715 | ||
716 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | |
3219b3b7 ND |
717 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
718 | (void *)bitmap); | |
72dba584 TH |
719 | pa[0]->count++; |
720 | } | |
721 | ||
722 | /* lookup for empty slot */ | |
723 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | |
724 | if (t == IDA_BITMAP_BITS) { | |
725 | /* no empty slot after offset, continue to the next chunk */ | |
726 | idr_id++; | |
727 | offset = 0; | |
728 | goto restart; | |
729 | } | |
730 | ||
731 | id = idr_id * IDA_BITMAP_BITS + t; | |
732 | if (id >= MAX_ID_BIT) | |
733 | return -ENOSPC; | |
734 | ||
735 | __set_bit(t, bitmap->bitmap); | |
736 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | |
737 | idr_mark_full(pa, idr_id); | |
738 | ||
739 | *p_id = id; | |
740 | ||
741 | /* Each leaf node can handle nearly a thousand slots and the | |
742 | * whole idea of ida is to have small memory foot print. | |
743 | * Throw away extra resources one by one after each successful | |
744 | * allocation. | |
745 | */ | |
746 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | |
4ae53789 | 747 | struct idr_layer *p = get_from_free_list(&ida->idr); |
72dba584 TH |
748 | if (p) |
749 | kmem_cache_free(idr_layer_cache, p); | |
750 | } | |
751 | ||
752 | return 0; | |
753 | } | |
754 | EXPORT_SYMBOL(ida_get_new_above); | |
755 | ||
756 | /** | |
757 | * ida_get_new - allocate new ID | |
758 | * @ida: idr handle | |
759 | * @p_id: pointer to the allocated handle | |
760 | * | |
761 | * Allocate new ID. It should be called with any required locks. | |
762 | * | |
763 | * If memory is required, it will return -EAGAIN, you should unlock | |
764 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
765 | * return -ENOSPC. | |
766 | * | |
767 | * @id returns a value in the range 0 ... 0x7fffffff. | |
768 | */ | |
769 | int ida_get_new(struct ida *ida, int *p_id) | |
770 | { | |
771 | return ida_get_new_above(ida, 0, p_id); | |
772 | } | |
773 | EXPORT_SYMBOL(ida_get_new); | |
774 | ||
775 | /** | |
776 | * ida_remove - remove the given ID | |
777 | * @ida: ida handle | |
778 | * @id: ID to free | |
779 | */ | |
780 | void ida_remove(struct ida *ida, int id) | |
781 | { | |
782 | struct idr_layer *p = ida->idr.top; | |
783 | int shift = (ida->idr.layers - 1) * IDR_BITS; | |
784 | int idr_id = id / IDA_BITMAP_BITS; | |
785 | int offset = id % IDA_BITMAP_BITS; | |
786 | int n; | |
787 | struct ida_bitmap *bitmap; | |
788 | ||
789 | /* clear full bits while looking up the leaf idr_layer */ | |
790 | while ((shift > 0) && p) { | |
791 | n = (idr_id >> shift) & IDR_MASK; | |
792 | __clear_bit(n, &p->bitmap); | |
793 | p = p->ary[n]; | |
794 | shift -= IDR_BITS; | |
795 | } | |
796 | ||
797 | if (p == NULL) | |
798 | goto err; | |
799 | ||
800 | n = idr_id & IDR_MASK; | |
801 | __clear_bit(n, &p->bitmap); | |
802 | ||
803 | bitmap = (void *)p->ary[n]; | |
804 | if (!test_bit(offset, bitmap->bitmap)) | |
805 | goto err; | |
806 | ||
807 | /* update bitmap and remove it if empty */ | |
808 | __clear_bit(offset, bitmap->bitmap); | |
809 | if (--bitmap->nr_busy == 0) { | |
810 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | |
811 | idr_remove(&ida->idr, idr_id); | |
812 | free_bitmap(ida, bitmap); | |
813 | } | |
814 | ||
815 | return; | |
816 | ||
817 | err: | |
818 | printk(KERN_WARNING | |
819 | "ida_remove called for id=%d which is not allocated.\n", id); | |
820 | } | |
821 | EXPORT_SYMBOL(ida_remove); | |
822 | ||
823 | /** | |
824 | * ida_destroy - release all cached layers within an ida tree | |
825 | * ida: ida handle | |
826 | */ | |
827 | void ida_destroy(struct ida *ida) | |
828 | { | |
829 | idr_destroy(&ida->idr); | |
830 | kfree(ida->free_bitmap); | |
831 | } | |
832 | EXPORT_SYMBOL(ida_destroy); | |
833 | ||
834 | /** | |
835 | * ida_init - initialize ida handle | |
836 | * @ida: ida handle | |
837 | * | |
838 | * This function is use to set up the handle (@ida) that you will pass | |
839 | * to the rest of the functions. | |
840 | */ | |
841 | void ida_init(struct ida *ida) | |
842 | { | |
843 | memset(ida, 0, sizeof(struct ida)); | |
844 | idr_init(&ida->idr); | |
845 | ||
846 | } | |
847 | EXPORT_SYMBOL(ida_init); |