#include <linux/string.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
static struct kmem_cache *idr_layer_cache;
+static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
+static DEFINE_PER_CPU(int, idr_preload_cnt);
static DEFINE_SPINLOCK(simple_ida_lock);
static struct idr_layer *get_from_free_list(struct idr *idp)
return(p);
}
+/**
+ * idr_layer_alloc - allocate a new idr_layer
+ * @gfp_mask: allocation mask
+ * @layer_idr: optional idr to allocate from
+ *
+ * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
+ * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
+ * an idr_layer from @idr->id_free.
+ *
+ * @layer_idr is to maintain backward compatibility with the old alloc
+ * interface - idr_pre_get() and idr_get_new*() - and will be removed
+ * together with per-pool preload buffer.
+ */
+static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
+{
+ struct idr_layer *new;
+
+ /* this is the old path, bypass to get_from_free_list() */
+ if (layer_idr)
+ return get_from_free_list(layer_idr);
+
+ /* try to allocate directly from kmem_cache */
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ if (new)
+ return new;
+
+ /*
+ * Try to fetch one from the per-cpu preload buffer if in process
+ * context. See idr_preload() for details.
+ */
+ if (in_interrupt())
+ return NULL;
+
+ preempt_disable();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+ preempt_enable();
+ return new;
+}
+
static void idr_layer_rcu_free(struct rcu_head *head)
{
struct idr_layer *layer;
}
EXPORT_SYMBOL(idr_pre_get);
-static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
+/**
+ * sub_alloc - try to allocate an id without growing the tree depth
+ * @idp: idr handle
+ * @starting_id: id to start search at
+ * @id: pointer to the allocated handle
+ * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
+ * @gfp_mask: allocation mask for idr_layer_alloc()
+ * @layer_idr: optional idr passed to idr_layer_alloc()
+ *
+ * Allocate an id in range [@starting_id, INT_MAX] from @idp without
+ * growing its depth. Returns
+ *
+ * the allocated id >= 0 if successful,
+ * -EAGAIN if the tree needs to grow for allocation to succeed,
+ * -ENOSPC if the id space is exhausted,
+ * -ENOMEM if more idr_layers need to be allocated.
+ */
+static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
+ gfp_t gfp_mask, struct idr *layer_idr)
{
int n, m, sh;
struct idr_layer *p, *new;
/* if already at the top layer, we need to grow */
if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
- return IDR_NEED_TO_GROW;
+ return -EAGAIN;
}
p = pa[l];
BUG_ON(!p);
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_IDR_BIT) || (id < 0))
- return IDR_NOMORE_SPACE;
+ return -ENOSPC;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
- new = get_from_free_list(idp);
+ new = idr_layer_alloc(gfp_mask, layer_idr);
if (!new)
- return -1;
+ return -ENOMEM;
new->layer = l-1;
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
static int idr_get_empty_slot(struct idr *idp, int starting_id,
- struct idr_layer **pa)
+ struct idr_layer **pa, gfp_t gfp_mask,
+ struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
- if (!(p = get_from_free_list(idp)))
- return -1;
+ if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
+ return -ENOMEM;
p->layer = 0;
layers = 1;
}
p->layer++;
continue;
}
- if (!(new = get_from_free_list(idp))) {
+ if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
__move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
- return -1;
+ return -ENOMEM;
}
new->ary[0] = p;
new->count = 1;
}
rcu_assign_pointer(idp->top, p);
idp->layers = layers;
- v = sub_alloc(idp, &id, pa);
- if (v == IDR_NEED_TO_GROW)
+ v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
+ if (v == -EAGAIN)
goto build_up;
return(v);
}
-static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+/*
+ * @id and @pa are from a successful allocation from idr_get_empty_slot().
+ * Install the user pointer @ptr and mark the slot full.
+ */
+static void idr_fill_slot(void *ptr, int id, struct idr_layer **pa)
{
- struct idr_layer *pa[MAX_IDR_LEVEL];
- int id;
-
- id = idr_get_empty_slot(idp, starting_id, pa);
- if (id >= 0) {
- /*
- * Successfully found an empty slot. Install the user
- * pointer and mark the slot full.
- */
- rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
- (struct idr_layer *)ptr);
- pa[0]->count++;
- idr_mark_full(pa, id);
- }
-
- return id;
+ rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
+ pa[0]->count++;
+ idr_mark_full(pa, id);
}
/**
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
+ struct idr_layer *pa[MAX_IDR_LEVEL];
int rv;
- rv = idr_get_new_above_int(idp, ptr, starting_id);
- /*
- * This is a cheap hack until the IDR code can be fixed to
- * return proper error values.
- */
+ rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
if (rv < 0)
- return _idr_rc_to_errno(rv);
+ return rv == -ENOMEM ? -EAGAIN : rv;
+
+ idr_fill_slot(ptr, rv, pa);
*id = rv;
return 0;
}
EXPORT_SYMBOL(idr_get_new_above);
/**
- * idr_get_new - allocate new idr entry
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: pointer to the allocated handle
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
*
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
+ * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
+ * process context and each idr_preload() invocation should be matched with
+ * idr_preload_end(). Note that preemption is disabled while preloaded.
*
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
+ * The first idr_alloc() in the preloaded section can be treated as if it
+ * were invoked with @gfp_mask used for preloading. This allows using more
+ * permissive allocation masks for idrs protected by spinlocks.
*
- * @id returns a value in the range %0 ... %0x7fffffff
+ * For example, if idr_alloc() below fails, the failure can be treated as
+ * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
+ *
+ * idr_preload(GFP_KERNEL);
+ * spin_lock(lock);
+ *
+ * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
+ *
+ * spin_unlock(lock);
+ * idr_preload_end();
+ * if (id < 0)
+ * error;
*/
-int idr_get_new(struct idr *idp, void *ptr, int *id)
+void idr_preload(gfp_t gfp_mask)
{
- int rv;
+ /*
+ * Consuming preload buffer from non-process context breaks preload
+ * allocation guarantee. Disallow usage from those contexts.
+ */
+ WARN_ON_ONCE(in_interrupt());
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ preempt_disable();
- rv = idr_get_new_above_int(idp, ptr, 0);
/*
- * This is a cheap hack until the IDR code can be fixed to
- * return proper error values.
+ * idr_alloc() is likely to succeed w/o full idr_layer buffer and
+ * return value from idr_alloc() needs to be checked for failure
+ * anyway. Silently give up if allocation fails. The caller can
+ * treat failures from idr_alloc() as if idr_alloc() were called
+ * with @gfp_mask which should be enough.
*/
- if (rv < 0)
- return _idr_rc_to_errno(rv);
- *id = rv;
- return 0;
+ while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
+ struct idr_layer *new;
+
+ preempt_enable();
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+ preempt_disable();
+ if (!new)
+ break;
+
+ /* link the new one to per-cpu preload list */
+ new->ary[0] = __this_cpu_read(idr_preload_head);
+ __this_cpu_write(idr_preload_head, new);
+ __this_cpu_inc(idr_preload_cnt);
+ }
+}
+EXPORT_SYMBOL(idr_preload);
+
+/**
+ * idr_alloc - allocate new idr entry
+ * @idr: the (initialized) idr
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive, <= 0 for max)
+ * @gfp_mask: memory allocation flags
+ *
+ * Allocate an id in [start, end) and associate it with @ptr. If no ID is
+ * available in the specified range, returns -ENOSPC. On memory allocation
+ * failure, returns -ENOMEM.
+ *
+ * Note that @end is treated as max when <= 0. This is to always allow
+ * using @start + N as @end as long as N is inside integer range.
+ *
+ * The user is responsible for exclusively synchronizing all operations
+ * which may modify @idr. However, read-only accesses such as idr_find()
+ * or iteration can be performed under RCU read lock provided the user
+ * destroys @ptr in RCU-safe way after removal from idr.
+ */
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+ int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
+ struct idr_layer *pa[MAX_IDR_LEVEL];
+ int id;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ /* sanity checks */
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+ if (unlikely(max < start))
+ return -ENOSPC;
+
+ /* allocate id */
+ id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
+ if (unlikely(id < 0))
+ return id;
+ if (unlikely(id > max))
+ return -ENOSPC;
+
+ idr_fill_slot(ptr, id, pa);
+ return id;
}
-EXPORT_SYMBOL(idr_get_new);
+EXPORT_SYMBOL_GPL(idr_alloc);
static void idr_remove_warning(int id)
{
}
EXPORT_SYMBOL(idr_remove);
-/**
- * idr_remove_all - remove all ids from the given idr tree
- * @idp: idr handle
- *
- * idr_destroy() only frees up unused, cached idp_layers, but this
- * function will remove all id mappings and leave all idp_layers
- * unused.
- *
- * A typical clean-up sequence for objects stored in an idr tree will
- * use idr_for_each() to free all objects, if necessay, then
- * idr_remove_all() to remove all ids, and idr_destroy() to free
- * up the cached idr_layers.
- */
-void idr_remove_all(struct idr *idp)
+void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
}
idp->layers = 0;
}
-EXPORT_SYMBOL(idr_remove_all);
+EXPORT_SYMBOL(__idr_remove_all);
/**
* idr_destroy - release all cached layers within an idr tree
* @idp: idr handle
+ *
+ * Free all id mappings and all idp_layers. After this function, @idp is
+ * completely unused and can be freed / recycled. The caller is
+ * responsible for ensuring that no one else accesses @idp during or after
+ * idr_destroy().
+ *
+ * A typical clean-up sequence for objects stored in an idr tree will use
+ * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * free up the id mappings and cached idr_layers.
*/
void idr_destroy(struct idr *idp)
{
+ __idr_remove_all(idp);
+
while (idp->id_free_cnt) {
struct idr_layer *p = get_from_free_list(idp);
kmem_cache_free(idr_layer_cache, p);
return p;
}
- id += 1 << n;
+ /*
+ * Proceed to the next layer at the current level. Unlike
+ * idr_for_each(), @id isn't guaranteed to be aligned to
+ * layer boundary at this point and adding 1 << n may
+ * incorrectly skip IDs. Make sure we jump to the
+ * beginning of the next layer using round_up().
+ */
+ id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
restart:
/* get vacant slot */
- t = idr_get_empty_slot(&ida->idr, idr_id, pa);
+ t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
if (t < 0)
- return _idr_rc_to_errno(t);
+ return t == -ENOMEM ? -EAGAIN : t;
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
}
EXPORT_SYMBOL(ida_get_new_above);
-/**
- * ida_get_new - allocate new ID
- * @ida: idr handle
- * @p_id: pointer to the allocated handle
- *
- * Allocate new ID. It should be called with any required locks.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return %-ENOSPC.
- *
- * @p_id returns a value in the range %0 ... %0x7fffffff.
- */
-int ida_get_new(struct ida *ida, int *p_id)
-{
- return ida_get_new_above(ida, 0, p_id);
-}
-EXPORT_SYMBOL(ida_get_new);
-
/**
* ida_remove - remove the given ID
* @ida: ida handle