Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
e15ae2dd | 9 | * Small id to pointer translation service. |
1da177e4 | 10 | * |
e15ae2dd | 11 | * It uses a radix tree like structure as a sparse array indexed |
1da177e4 | 12 | * by the id to obtain the pointer. The bitmap makes allocating |
e15ae2dd | 13 | * a new id quick. |
1da177e4 LT |
14 | * |
15 | * You call it to allocate an id (an int) an associate with that id a | |
16 | * pointer or what ever, we treat it as a (void *). You can pass this | |
17 | * id to a user for him to pass back at a later time. You then pass | |
18 | * that id to this code and it returns your pointer. | |
19 | ||
e15ae2dd | 20 | * You can release ids at any time. When all ids are released, most of |
1da177e4 | 21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
e15ae2dd | 22 | * don't need to go to the memory "store" during an id allocate, just |
1da177e4 LT |
23 | * so you don't need to be too concerned about locking and conflicts |
24 | * with the slab allocator. | |
25 | */ | |
26 | ||
27 | #ifndef TEST // to test in user space... | |
28 | #include <linux/slab.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/module.h> | |
31 | #endif | |
5806f07c | 32 | #include <linux/err.h> |
1da177e4 LT |
33 | #include <linux/string.h> |
34 | #include <linux/idr.h> | |
35 | ||
e18b890b | 36 | static struct kmem_cache *idr_layer_cache; |
1da177e4 | 37 | |
4ae53789 | 38 | static struct idr_layer *get_from_free_list(struct idr *idp) |
1da177e4 LT |
39 | { |
40 | struct idr_layer *p; | |
c259cc28 | 41 | unsigned long flags; |
1da177e4 | 42 | |
c259cc28 | 43 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
44 | if ((p = idp->id_free)) { |
45 | idp->id_free = p->ary[0]; | |
46 | idp->id_free_cnt--; | |
47 | p->ary[0] = NULL; | |
48 | } | |
c259cc28 | 49 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
50 | return(p); |
51 | } | |
52 | ||
1eec0056 | 53 | /* only called when idp->lock is held */ |
4ae53789 | 54 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
1eec0056 SR |
55 | { |
56 | p->ary[0] = idp->id_free; | |
57 | idp->id_free = p; | |
58 | idp->id_free_cnt++; | |
59 | } | |
60 | ||
4ae53789 | 61 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
1da177e4 | 62 | { |
c259cc28 RD |
63 | unsigned long flags; |
64 | ||
1da177e4 LT |
65 | /* |
66 | * Depends on the return element being zeroed. | |
67 | */ | |
c259cc28 | 68 | spin_lock_irqsave(&idp->lock, flags); |
4ae53789 | 69 | __move_to_free_list(idp, p); |
c259cc28 | 70 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
71 | } |
72 | ||
e33ac8bd TH |
73 | static void idr_mark_full(struct idr_layer **pa, int id) |
74 | { | |
75 | struct idr_layer *p = pa[0]; | |
76 | int l = 0; | |
77 | ||
78 | __set_bit(id & IDR_MASK, &p->bitmap); | |
79 | /* | |
80 | * If this layer is full mark the bit in the layer above to | |
81 | * show that this part of the radix tree is full. This may | |
82 | * complete the layer above and require walking up the radix | |
83 | * tree. | |
84 | */ | |
85 | while (p->bitmap == IDR_FULL) { | |
86 | if (!(p = pa[++l])) | |
87 | break; | |
88 | id = id >> IDR_BITS; | |
89 | __set_bit((id & IDR_MASK), &p->bitmap); | |
90 | } | |
91 | } | |
92 | ||
1da177e4 LT |
93 | /** |
94 | * idr_pre_get - reserver resources for idr allocation | |
95 | * @idp: idr handle | |
96 | * @gfp_mask: memory allocation flags | |
97 | * | |
98 | * This function should be called prior to locking and calling the | |
99 | * following function. It preallocates enough memory to satisfy | |
100 | * the worst possible allocation. | |
101 | * | |
102 | * If the system is REALLY out of memory this function returns 0, | |
103 | * otherwise 1. | |
104 | */ | |
fd4f2df2 | 105 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
1da177e4 LT |
106 | { |
107 | while (idp->id_free_cnt < IDR_FREE_MAX) { | |
108 | struct idr_layer *new; | |
109 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | |
e15ae2dd | 110 | if (new == NULL) |
1da177e4 | 111 | return (0); |
4ae53789 | 112 | move_to_free_list(idp, new); |
1da177e4 LT |
113 | } |
114 | return 1; | |
115 | } | |
116 | EXPORT_SYMBOL(idr_pre_get); | |
117 | ||
e33ac8bd | 118 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
1da177e4 LT |
119 | { |
120 | int n, m, sh; | |
121 | struct idr_layer *p, *new; | |
7aae6dd8 | 122 | int l, id, oid; |
5ba25331 | 123 | unsigned long bm; |
1da177e4 LT |
124 | |
125 | id = *starting_id; | |
7aae6dd8 | 126 | restart: |
1da177e4 LT |
127 | p = idp->top; |
128 | l = idp->layers; | |
129 | pa[l--] = NULL; | |
130 | while (1) { | |
131 | /* | |
132 | * We run around this while until we reach the leaf node... | |
133 | */ | |
134 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
135 | bm = ~p->bitmap; | |
136 | m = find_next_bit(&bm, IDR_SIZE, n); | |
137 | if (m == IDR_SIZE) { | |
138 | /* no space available go back to previous layer. */ | |
139 | l++; | |
7aae6dd8 | 140 | oid = id; |
e15ae2dd | 141 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
7aae6dd8 TH |
142 | |
143 | /* if already at the top layer, we need to grow */ | |
1da177e4 LT |
144 | if (!(p = pa[l])) { |
145 | *starting_id = id; | |
944ca05c | 146 | return IDR_NEED_TO_GROW; |
1da177e4 | 147 | } |
7aae6dd8 TH |
148 | |
149 | /* If we need to go up one layer, continue the | |
150 | * loop; otherwise, restart from the top. | |
151 | */ | |
152 | sh = IDR_BITS * (l + 1); | |
153 | if (oid >> sh == id >> sh) | |
154 | continue; | |
155 | else | |
156 | goto restart; | |
1da177e4 LT |
157 | } |
158 | if (m != n) { | |
159 | sh = IDR_BITS*l; | |
160 | id = ((id >> sh) ^ n ^ m) << sh; | |
161 | } | |
162 | if ((id >= MAX_ID_BIT) || (id < 0)) | |
944ca05c | 163 | return IDR_NOMORE_SPACE; |
1da177e4 LT |
164 | if (l == 0) |
165 | break; | |
166 | /* | |
167 | * Create the layer below if it is missing. | |
168 | */ | |
169 | if (!p->ary[m]) { | |
4ae53789 ND |
170 | new = get_from_free_list(idp); |
171 | if (!new) | |
1da177e4 LT |
172 | return -1; |
173 | p->ary[m] = new; | |
174 | p->count++; | |
175 | } | |
176 | pa[l--] = p; | |
177 | p = p->ary[m]; | |
178 | } | |
e33ac8bd TH |
179 | |
180 | pa[l] = p; | |
181 | return id; | |
1da177e4 LT |
182 | } |
183 | ||
e33ac8bd TH |
184 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
185 | struct idr_layer **pa) | |
1da177e4 LT |
186 | { |
187 | struct idr_layer *p, *new; | |
188 | int layers, v, id; | |
c259cc28 | 189 | unsigned long flags; |
e15ae2dd | 190 | |
1da177e4 LT |
191 | id = starting_id; |
192 | build_up: | |
193 | p = idp->top; | |
194 | layers = idp->layers; | |
195 | if (unlikely(!p)) { | |
4ae53789 | 196 | if (!(p = get_from_free_list(idp))) |
1da177e4 LT |
197 | return -1; |
198 | layers = 1; | |
199 | } | |
200 | /* | |
201 | * Add a new layer to the top of the tree if the requested | |
202 | * id is larger than the currently allocated space. | |
203 | */ | |
589777ea | 204 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
1da177e4 LT |
205 | layers++; |
206 | if (!p->count) | |
207 | continue; | |
4ae53789 | 208 | if (!(new = get_from_free_list(idp))) { |
1da177e4 LT |
209 | /* |
210 | * The allocation failed. If we built part of | |
211 | * the structure tear it down. | |
212 | */ | |
c259cc28 | 213 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
214 | for (new = p; p && p != idp->top; new = p) { |
215 | p = p->ary[0]; | |
216 | new->ary[0] = NULL; | |
217 | new->bitmap = new->count = 0; | |
4ae53789 | 218 | __move_to_free_list(idp, new); |
1da177e4 | 219 | } |
c259cc28 | 220 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
221 | return -1; |
222 | } | |
223 | new->ary[0] = p; | |
224 | new->count = 1; | |
225 | if (p->bitmap == IDR_FULL) | |
226 | __set_bit(0, &new->bitmap); | |
227 | p = new; | |
228 | } | |
229 | idp->top = p; | |
230 | idp->layers = layers; | |
e33ac8bd | 231 | v = sub_alloc(idp, &id, pa); |
944ca05c | 232 | if (v == IDR_NEED_TO_GROW) |
1da177e4 LT |
233 | goto build_up; |
234 | return(v); | |
235 | } | |
236 | ||
e33ac8bd TH |
237 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
238 | { | |
239 | struct idr_layer *pa[MAX_LEVEL]; | |
240 | int id; | |
241 | ||
242 | id = idr_get_empty_slot(idp, starting_id, pa); | |
243 | if (id >= 0) { | |
244 | /* | |
245 | * Successfully found an empty slot. Install the user | |
246 | * pointer and mark the slot full. | |
247 | */ | |
248 | pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; | |
249 | pa[0]->count++; | |
250 | idr_mark_full(pa, id); | |
251 | } | |
252 | ||
253 | return id; | |
254 | } | |
255 | ||
1da177e4 | 256 | /** |
7c657f2f | 257 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
1da177e4 LT |
258 | * @idp: idr handle |
259 | * @ptr: pointer you want associated with the ide | |
260 | * @start_id: id to start search at | |
261 | * @id: pointer to the allocated handle | |
262 | * | |
263 | * This is the allocate id function. It should be called with any | |
264 | * required locks. | |
265 | * | |
266 | * If memory is required, it will return -EAGAIN, you should unlock | |
267 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
268 | * return -ENOSPC. | |
269 | * | |
270 | * @id returns a value in the range 0 ... 0x7fffffff | |
271 | */ | |
272 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
273 | { | |
274 | int rv; | |
e15ae2dd | 275 | |
1da177e4 LT |
276 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
277 | /* | |
278 | * This is a cheap hack until the IDR code can be fixed to | |
279 | * return proper error values. | |
280 | */ | |
944ca05c ND |
281 | if (rv < 0) |
282 | return _idr_rc_to_errno(rv); | |
1da177e4 LT |
283 | *id = rv; |
284 | return 0; | |
285 | } | |
286 | EXPORT_SYMBOL(idr_get_new_above); | |
287 | ||
288 | /** | |
289 | * idr_get_new - allocate new idr entry | |
290 | * @idp: idr handle | |
291 | * @ptr: pointer you want associated with the ide | |
292 | * @id: pointer to the allocated handle | |
293 | * | |
294 | * This is the allocate id function. It should be called with any | |
295 | * required locks. | |
296 | * | |
297 | * If memory is required, it will return -EAGAIN, you should unlock | |
298 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
299 | * return -ENOSPC. | |
300 | * | |
301 | * @id returns a value in the range 0 ... 0x7fffffff | |
302 | */ | |
303 | int idr_get_new(struct idr *idp, void *ptr, int *id) | |
304 | { | |
305 | int rv; | |
e15ae2dd | 306 | |
1da177e4 LT |
307 | rv = idr_get_new_above_int(idp, ptr, 0); |
308 | /* | |
309 | * This is a cheap hack until the IDR code can be fixed to | |
310 | * return proper error values. | |
311 | */ | |
944ca05c ND |
312 | if (rv < 0) |
313 | return _idr_rc_to_errno(rv); | |
1da177e4 LT |
314 | *id = rv; |
315 | return 0; | |
316 | } | |
317 | EXPORT_SYMBOL(idr_get_new); | |
318 | ||
319 | static void idr_remove_warning(int id) | |
320 | { | |
f098ad65 ND |
321 | printk(KERN_WARNING |
322 | "idr_remove called for id=%d which is not allocated.\n", id); | |
1da177e4 LT |
323 | dump_stack(); |
324 | } | |
325 | ||
326 | static void sub_remove(struct idr *idp, int shift, int id) | |
327 | { | |
328 | struct idr_layer *p = idp->top; | |
329 | struct idr_layer **pa[MAX_LEVEL]; | |
330 | struct idr_layer ***paa = &pa[0]; | |
331 | int n; | |
332 | ||
333 | *paa = NULL; | |
334 | *++paa = &idp->top; | |
335 | ||
336 | while ((shift > 0) && p) { | |
337 | n = (id >> shift) & IDR_MASK; | |
338 | __clear_bit(n, &p->bitmap); | |
339 | *++paa = &p->ary[n]; | |
340 | p = p->ary[n]; | |
341 | shift -= IDR_BITS; | |
342 | } | |
343 | n = id & IDR_MASK; | |
344 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | |
345 | __clear_bit(n, &p->bitmap); | |
346 | p->ary[n] = NULL; | |
347 | while(*paa && ! --((**paa)->count)){ | |
4ae53789 | 348 | move_to_free_list(idp, **paa); |
1da177e4 LT |
349 | **paa-- = NULL; |
350 | } | |
e15ae2dd | 351 | if (!*paa) |
1da177e4 | 352 | idp->layers = 0; |
e15ae2dd | 353 | } else |
1da177e4 | 354 | idr_remove_warning(id); |
1da177e4 LT |
355 | } |
356 | ||
357 | /** | |
358 | * idr_remove - remove the given id and free it's slot | |
72fd4a35 RD |
359 | * @idp: idr handle |
360 | * @id: unique key | |
1da177e4 LT |
361 | */ |
362 | void idr_remove(struct idr *idp, int id) | |
363 | { | |
364 | struct idr_layer *p; | |
365 | ||
366 | /* Mask off upper bits we don't use for the search. */ | |
367 | id &= MAX_ID_MASK; | |
368 | ||
369 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
e15ae2dd JJ |
370 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
371 | idp->top->ary[0]) { // We can drop a layer | |
1da177e4 LT |
372 | |
373 | p = idp->top->ary[0]; | |
374 | idp->top->bitmap = idp->top->count = 0; | |
4ae53789 | 375 | move_to_free_list(idp, idp->top); |
1da177e4 LT |
376 | idp->top = p; |
377 | --idp->layers; | |
378 | } | |
379 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | |
4ae53789 | 380 | p = get_from_free_list(idp); |
1da177e4 | 381 | kmem_cache_free(idr_layer_cache, p); |
1da177e4 | 382 | } |
af8e2a4c | 383 | return; |
1da177e4 LT |
384 | } |
385 | EXPORT_SYMBOL(idr_remove); | |
386 | ||
23936cc0 KH |
387 | /** |
388 | * idr_remove_all - remove all ids from the given idr tree | |
389 | * @idp: idr handle | |
390 | * | |
391 | * idr_destroy() only frees up unused, cached idp_layers, but this | |
392 | * function will remove all id mappings and leave all idp_layers | |
393 | * unused. | |
394 | * | |
395 | * A typical clean-up sequence for objects stored in an idr tree, will | |
396 | * use idr_for_each() to free all objects, if necessay, then | |
397 | * idr_remove_all() to remove all ids, and idr_destroy() to free | |
398 | * up the cached idr_layers. | |
399 | */ | |
400 | void idr_remove_all(struct idr *idp) | |
401 | { | |
6ace06dc | 402 | int n, id, max; |
23936cc0 KH |
403 | struct idr_layer *p; |
404 | struct idr_layer *pa[MAX_LEVEL]; | |
405 | struct idr_layer **paa = &pa[0]; | |
406 | ||
407 | n = idp->layers * IDR_BITS; | |
408 | p = idp->top; | |
409 | max = 1 << n; | |
410 | ||
411 | id = 0; | |
6ace06dc | 412 | while (id < max) { |
23936cc0 KH |
413 | while (n > IDR_BITS && p) { |
414 | n -= IDR_BITS; | |
415 | *paa++ = p; | |
416 | p = p->ary[(id >> n) & IDR_MASK]; | |
417 | } | |
418 | ||
419 | id += 1 << n; | |
420 | while (n < fls(id)) { | |
421 | if (p) { | |
422 | memset(p, 0, sizeof *p); | |
4ae53789 | 423 | move_to_free_list(idp, p); |
23936cc0 KH |
424 | } |
425 | n += IDR_BITS; | |
426 | p = *--paa; | |
427 | } | |
428 | } | |
429 | idp->top = NULL; | |
430 | idp->layers = 0; | |
431 | } | |
432 | EXPORT_SYMBOL(idr_remove_all); | |
433 | ||
8d3b3591 AM |
434 | /** |
435 | * idr_destroy - release all cached layers within an idr tree | |
436 | * idp: idr handle | |
437 | */ | |
438 | void idr_destroy(struct idr *idp) | |
439 | { | |
440 | while (idp->id_free_cnt) { | |
4ae53789 | 441 | struct idr_layer *p = get_from_free_list(idp); |
8d3b3591 AM |
442 | kmem_cache_free(idr_layer_cache, p); |
443 | } | |
444 | } | |
445 | EXPORT_SYMBOL(idr_destroy); | |
446 | ||
1da177e4 LT |
447 | /** |
448 | * idr_find - return pointer for given id | |
449 | * @idp: idr handle | |
450 | * @id: lookup key | |
451 | * | |
452 | * Return the pointer given the id it has been registered with. A %NULL | |
453 | * return indicates that @id is not valid or you passed %NULL in | |
454 | * idr_get_new(). | |
455 | * | |
456 | * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). | |
457 | */ | |
458 | void *idr_find(struct idr *idp, int id) | |
459 | { | |
460 | int n; | |
461 | struct idr_layer *p; | |
462 | ||
463 | n = idp->layers * IDR_BITS; | |
464 | p = idp->top; | |
465 | ||
466 | /* Mask off upper bits we don't use for the search. */ | |
467 | id &= MAX_ID_MASK; | |
468 | ||
469 | if (id >= (1 << n)) | |
470 | return NULL; | |
471 | ||
472 | while (n > 0 && p) { | |
473 | n -= IDR_BITS; | |
474 | p = p->ary[(id >> n) & IDR_MASK]; | |
475 | } | |
476 | return((void *)p); | |
477 | } | |
478 | EXPORT_SYMBOL(idr_find); | |
479 | ||
96d7fa42 KH |
480 | /** |
481 | * idr_for_each - iterate through all stored pointers | |
482 | * @idp: idr handle | |
483 | * @fn: function to be called for each pointer | |
484 | * @data: data passed back to callback function | |
485 | * | |
486 | * Iterate over the pointers registered with the given idr. The | |
487 | * callback function will be called for each pointer currently | |
488 | * registered, passing the id, the pointer and the data pointer passed | |
489 | * to this function. It is not safe to modify the idr tree while in | |
490 | * the callback, so functions such as idr_get_new and idr_remove are | |
491 | * not allowed. | |
492 | * | |
493 | * We check the return of @fn each time. If it returns anything other | |
494 | * than 0, we break out and return that value. | |
495 | * | |
496 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | |
497 | */ | |
498 | int idr_for_each(struct idr *idp, | |
499 | int (*fn)(int id, void *p, void *data), void *data) | |
500 | { | |
501 | int n, id, max, error = 0; | |
502 | struct idr_layer *p; | |
503 | struct idr_layer *pa[MAX_LEVEL]; | |
504 | struct idr_layer **paa = &pa[0]; | |
505 | ||
506 | n = idp->layers * IDR_BITS; | |
507 | p = idp->top; | |
508 | max = 1 << n; | |
509 | ||
510 | id = 0; | |
511 | while (id < max) { | |
512 | while (n > 0 && p) { | |
513 | n -= IDR_BITS; | |
514 | *paa++ = p; | |
515 | p = p->ary[(id >> n) & IDR_MASK]; | |
516 | } | |
517 | ||
518 | if (p) { | |
519 | error = fn(id, (void *)p, data); | |
520 | if (error) | |
521 | break; | |
522 | } | |
523 | ||
524 | id += 1 << n; | |
525 | while (n < fls(id)) { | |
526 | n += IDR_BITS; | |
527 | p = *--paa; | |
528 | } | |
529 | } | |
530 | ||
531 | return error; | |
532 | } | |
533 | EXPORT_SYMBOL(idr_for_each); | |
534 | ||
5806f07c JM |
535 | /** |
536 | * idr_replace - replace pointer for given id | |
537 | * @idp: idr handle | |
538 | * @ptr: pointer you want associated with the id | |
539 | * @id: lookup key | |
540 | * | |
541 | * Replace the pointer registered with an id and return the old value. | |
542 | * A -ENOENT return indicates that @id was not found. | |
543 | * A -EINVAL return indicates that @id was not within valid constraints. | |
544 | * | |
545 | * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). | |
546 | */ | |
547 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
548 | { | |
549 | int n; | |
550 | struct idr_layer *p, *old_p; | |
551 | ||
552 | n = idp->layers * IDR_BITS; | |
553 | p = idp->top; | |
554 | ||
555 | id &= MAX_ID_MASK; | |
556 | ||
557 | if (id >= (1 << n)) | |
558 | return ERR_PTR(-EINVAL); | |
559 | ||
560 | n -= IDR_BITS; | |
561 | while ((n > 0) && p) { | |
562 | p = p->ary[(id >> n) & IDR_MASK]; | |
563 | n -= IDR_BITS; | |
564 | } | |
565 | ||
566 | n = id & IDR_MASK; | |
567 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | |
568 | return ERR_PTR(-ENOENT); | |
569 | ||
570 | old_p = p->ary[n]; | |
571 | p->ary[n] = ptr; | |
572 | ||
573 | return old_p; | |
574 | } | |
575 | EXPORT_SYMBOL(idr_replace); | |
576 | ||
4ba9b9d0 | 577 | static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) |
1da177e4 LT |
578 | { |
579 | memset(idr_layer, 0, sizeof(struct idr_layer)); | |
580 | } | |
581 | ||
199f0ca5 | 582 | void __init idr_init_cache(void) |
1da177e4 | 583 | { |
199f0ca5 AM |
584 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
585 | sizeof(struct idr_layer), 0, SLAB_PANIC, | |
586 | idr_cache_ctor); | |
1da177e4 LT |
587 | } |
588 | ||
589 | /** | |
590 | * idr_init - initialize idr handle | |
591 | * @idp: idr handle | |
592 | * | |
593 | * This function is use to set up the handle (@idp) that you will pass | |
594 | * to the rest of the functions. | |
595 | */ | |
596 | void idr_init(struct idr *idp) | |
597 | { | |
1da177e4 LT |
598 | memset(idp, 0, sizeof(struct idr)); |
599 | spin_lock_init(&idp->lock); | |
600 | } | |
601 | EXPORT_SYMBOL(idr_init); | |
72dba584 TH |
602 | |
603 | ||
604 | /* | |
605 | * IDA - IDR based ID allocator | |
606 | * | |
607 | * this is id allocator without id -> pointer translation. Memory | |
608 | * usage is much lower than full blown idr because each id only | |
609 | * occupies a bit. ida uses a custom leaf node which contains | |
610 | * IDA_BITMAP_BITS slots. | |
611 | * | |
612 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | |
613 | */ | |
614 | ||
615 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |
616 | { | |
617 | unsigned long flags; | |
618 | ||
619 | if (!ida->free_bitmap) { | |
620 | spin_lock_irqsave(&ida->idr.lock, flags); | |
621 | if (!ida->free_bitmap) { | |
622 | ida->free_bitmap = bitmap; | |
623 | bitmap = NULL; | |
624 | } | |
625 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
626 | } | |
627 | ||
628 | kfree(bitmap); | |
629 | } | |
630 | ||
631 | /** | |
632 | * ida_pre_get - reserve resources for ida allocation | |
633 | * @ida: ida handle | |
634 | * @gfp_mask: memory allocation flag | |
635 | * | |
636 | * This function should be called prior to locking and calling the | |
637 | * following function. It preallocates enough memory to satisfy the | |
638 | * worst possible allocation. | |
639 | * | |
640 | * If the system is REALLY out of memory this function returns 0, | |
641 | * otherwise 1. | |
642 | */ | |
643 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | |
644 | { | |
645 | /* allocate idr_layers */ | |
646 | if (!idr_pre_get(&ida->idr, gfp_mask)) | |
647 | return 0; | |
648 | ||
649 | /* allocate free_bitmap */ | |
650 | if (!ida->free_bitmap) { | |
651 | struct ida_bitmap *bitmap; | |
652 | ||
653 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | |
654 | if (!bitmap) | |
655 | return 0; | |
656 | ||
657 | free_bitmap(ida, bitmap); | |
658 | } | |
659 | ||
660 | return 1; | |
661 | } | |
662 | EXPORT_SYMBOL(ida_pre_get); | |
663 | ||
664 | /** | |
665 | * ida_get_new_above - allocate new ID above or equal to a start id | |
666 | * @ida: ida handle | |
667 | * @staring_id: id to start search at | |
668 | * @p_id: pointer to the allocated handle | |
669 | * | |
670 | * Allocate new ID above or equal to @ida. It should be called with | |
671 | * any required locks. | |
672 | * | |
673 | * If memory is required, it will return -EAGAIN, you should unlock | |
674 | * and go back to the ida_pre_get() call. If the ida is full, it will | |
675 | * return -ENOSPC. | |
676 | * | |
677 | * @p_id returns a value in the range 0 ... 0x7fffffff. | |
678 | */ | |
679 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |
680 | { | |
681 | struct idr_layer *pa[MAX_LEVEL]; | |
682 | struct ida_bitmap *bitmap; | |
683 | unsigned long flags; | |
684 | int idr_id = starting_id / IDA_BITMAP_BITS; | |
685 | int offset = starting_id % IDA_BITMAP_BITS; | |
686 | int t, id; | |
687 | ||
688 | restart: | |
689 | /* get vacant slot */ | |
690 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | |
944ca05c ND |
691 | if (t < 0) |
692 | return _idr_rc_to_errno(t); | |
72dba584 TH |
693 | |
694 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | |
695 | return -ENOSPC; | |
696 | ||
697 | if (t != idr_id) | |
698 | offset = 0; | |
699 | idr_id = t; | |
700 | ||
701 | /* if bitmap isn't there, create a new one */ | |
702 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | |
703 | if (!bitmap) { | |
704 | spin_lock_irqsave(&ida->idr.lock, flags); | |
705 | bitmap = ida->free_bitmap; | |
706 | ida->free_bitmap = NULL; | |
707 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
708 | ||
709 | if (!bitmap) | |
710 | return -EAGAIN; | |
711 | ||
712 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | |
713 | pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; | |
714 | pa[0]->count++; | |
715 | } | |
716 | ||
717 | /* lookup for empty slot */ | |
718 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | |
719 | if (t == IDA_BITMAP_BITS) { | |
720 | /* no empty slot after offset, continue to the next chunk */ | |
721 | idr_id++; | |
722 | offset = 0; | |
723 | goto restart; | |
724 | } | |
725 | ||
726 | id = idr_id * IDA_BITMAP_BITS + t; | |
727 | if (id >= MAX_ID_BIT) | |
728 | return -ENOSPC; | |
729 | ||
730 | __set_bit(t, bitmap->bitmap); | |
731 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | |
732 | idr_mark_full(pa, idr_id); | |
733 | ||
734 | *p_id = id; | |
735 | ||
736 | /* Each leaf node can handle nearly a thousand slots and the | |
737 | * whole idea of ida is to have small memory foot print. | |
738 | * Throw away extra resources one by one after each successful | |
739 | * allocation. | |
740 | */ | |
741 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | |
4ae53789 | 742 | struct idr_layer *p = get_from_free_list(&ida->idr); |
72dba584 TH |
743 | if (p) |
744 | kmem_cache_free(idr_layer_cache, p); | |
745 | } | |
746 | ||
747 | return 0; | |
748 | } | |
749 | EXPORT_SYMBOL(ida_get_new_above); | |
750 | ||
751 | /** | |
752 | * ida_get_new - allocate new ID | |
753 | * @ida: idr handle | |
754 | * @p_id: pointer to the allocated handle | |
755 | * | |
756 | * Allocate new ID. It should be called with any required locks. | |
757 | * | |
758 | * If memory is required, it will return -EAGAIN, you should unlock | |
759 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
760 | * return -ENOSPC. | |
761 | * | |
762 | * @id returns a value in the range 0 ... 0x7fffffff. | |
763 | */ | |
764 | int ida_get_new(struct ida *ida, int *p_id) | |
765 | { | |
766 | return ida_get_new_above(ida, 0, p_id); | |
767 | } | |
768 | EXPORT_SYMBOL(ida_get_new); | |
769 | ||
770 | /** | |
771 | * ida_remove - remove the given ID | |
772 | * @ida: ida handle | |
773 | * @id: ID to free | |
774 | */ | |
775 | void ida_remove(struct ida *ida, int id) | |
776 | { | |
777 | struct idr_layer *p = ida->idr.top; | |
778 | int shift = (ida->idr.layers - 1) * IDR_BITS; | |
779 | int idr_id = id / IDA_BITMAP_BITS; | |
780 | int offset = id % IDA_BITMAP_BITS; | |
781 | int n; | |
782 | struct ida_bitmap *bitmap; | |
783 | ||
784 | /* clear full bits while looking up the leaf idr_layer */ | |
785 | while ((shift > 0) && p) { | |
786 | n = (idr_id >> shift) & IDR_MASK; | |
787 | __clear_bit(n, &p->bitmap); | |
788 | p = p->ary[n]; | |
789 | shift -= IDR_BITS; | |
790 | } | |
791 | ||
792 | if (p == NULL) | |
793 | goto err; | |
794 | ||
795 | n = idr_id & IDR_MASK; | |
796 | __clear_bit(n, &p->bitmap); | |
797 | ||
798 | bitmap = (void *)p->ary[n]; | |
799 | if (!test_bit(offset, bitmap->bitmap)) | |
800 | goto err; | |
801 | ||
802 | /* update bitmap and remove it if empty */ | |
803 | __clear_bit(offset, bitmap->bitmap); | |
804 | if (--bitmap->nr_busy == 0) { | |
805 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | |
806 | idr_remove(&ida->idr, idr_id); | |
807 | free_bitmap(ida, bitmap); | |
808 | } | |
809 | ||
810 | return; | |
811 | ||
812 | err: | |
813 | printk(KERN_WARNING | |
814 | "ida_remove called for id=%d which is not allocated.\n", id); | |
815 | } | |
816 | EXPORT_SYMBOL(ida_remove); | |
817 | ||
818 | /** | |
819 | * ida_destroy - release all cached layers within an ida tree | |
820 | * ida: ida handle | |
821 | */ | |
822 | void ida_destroy(struct ida *ida) | |
823 | { | |
824 | idr_destroy(&ida->idr); | |
825 | kfree(ida->free_bitmap); | |
826 | } | |
827 | EXPORT_SYMBOL(ida_destroy); | |
828 | ||
829 | /** | |
830 | * ida_init - initialize ida handle | |
831 | * @ida: ida handle | |
832 | * | |
833 | * This function is use to set up the handle (@ida) that you will pass | |
834 | * to the rest of the functions. | |
835 | */ | |
836 | void ida_init(struct ida *ida) | |
837 | { | |
838 | memset(ida, 0, sizeof(struct ida)); | |
839 | idr_init(&ida->idr); | |
840 | ||
841 | } | |
842 | EXPORT_SYMBOL(ida_init); |