ACPI: fix false-positive -Wuninitialized warning
[linux-2.6-block.git] / lib / debugobjects.c
index 714459a8dc108e94a4fd38dc80ee070c6b2a24f7..61261195f5b60b4c143adbca5967f979372798c5 100644 (file)
 #define ODEBUG_CHUNK_SIZE      (1 << ODEBUG_CHUNK_SHIFT)
 #define ODEBUG_CHUNK_MASK      (~(ODEBUG_CHUNK_SIZE - 1))
 
+/*
+ * We limit the freeing of debug objects via workqueue at a maximum
+ * frequency of 10Hz and about 1024 objects for each freeing operation.
+ * So it is freeing at most 10k debug objects per second.
+ */
+#define ODEBUG_FREE_WORK_MAX   1024
+#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
+
 struct debug_bucket {
        struct hlist_head       list;
        raw_spinlock_t          lock;
@@ -68,6 +76,7 @@ static int                    obj_pool_min_free = ODEBUG_POOL_SIZE;
 static int                     obj_pool_free = ODEBUG_POOL_SIZE;
 static int                     obj_pool_used;
 static int                     obj_pool_max_used;
+static bool                    obj_freeing;
 /* The number of objs on the global free list */
 static int                     obj_nr_tofree;
 
@@ -91,7 +100,7 @@ static int                   debug_objects_allocated;
 static int                     debug_objects_freed;
 
 static void free_obj_work(struct work_struct *work);
-static DECLARE_WORK(debug_obj_work, free_obj_work);
+static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 
 static int __init enable_object_debug(char *str)
 {
@@ -120,7 +129,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
 static void fill_pool(void)
 {
        gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
-       struct debug_obj *new, *obj;
+       struct debug_obj *obj;
        unsigned long flags;
 
        if (likely(obj_pool_free >= debug_objects_pool_min_level))
@@ -136,7 +145,7 @@ static void fill_pool(void)
                 * Recheck with the lock held as the worker thread might have
                 * won the race and freed the global free list already.
                 */
-               if (obj_nr_tofree) {
+               while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
                        obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
                        hlist_del(&obj->node);
                        obj_nr_tofree--;
@@ -150,15 +159,23 @@ static void fill_pool(void)
                return;
 
        while (obj_pool_free < debug_objects_pool_min_level) {
+               struct debug_obj *new[ODEBUG_BATCH_SIZE];
+               int cnt;
 
-               new = kmem_cache_zalloc(obj_cache, gfp);
-               if (!new)
+               for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+                       new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
+                       if (!new[cnt])
+                               break;
+               }
+               if (!cnt)
                        return;
 
                raw_spin_lock_irqsave(&pool_lock, flags);
-               hlist_add_head(&new->node, &obj_pool);
-               debug_objects_allocated++;
-               obj_pool_free++;
+               while (cnt) {
+                       hlist_add_head(&new[--cnt]->node, &obj_pool);
+                       debug_objects_allocated++;
+                       obj_pool_free++;
+               }
                raw_spin_unlock_irqrestore(&pool_lock, flags);
        }
 }
@@ -274,13 +291,19 @@ static void free_obj_work(struct work_struct *work)
        unsigned long flags;
        HLIST_HEAD(tofree);
 
+       WRITE_ONCE(obj_freeing, false);
        if (!raw_spin_trylock_irqsave(&pool_lock, flags))
                return;
 
+       if (obj_pool_free >= debug_objects_pool_size)
+               goto free_objs;
+
        /*
         * The objs on the pool list might be allocated before the work is
         * run, so recheck if pool list it full or not, if not fill pool
-        * list from the global free list
+        * list from the global free list. As it is likely that a workload
+        * may be gearing up to use more and more objects, don't free any
+        * of them until the next round.
         */
        while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
                obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
@@ -289,7 +312,10 @@ static void free_obj_work(struct work_struct *work)
                obj_pool_free++;
                obj_nr_tofree--;
        }
+       raw_spin_unlock_irqrestore(&pool_lock, flags);
+       return;
 
+free_objs:
        /*
         * Pool list is already full and there are still objs on the free
         * list. Move remaining free objs to a temporary list to free the
@@ -308,7 +334,7 @@ static void free_obj_work(struct work_struct *work)
        }
 }
 
-static bool __free_object(struct debug_obj *obj)
+static void __free_object(struct debug_obj *obj)
 {
        struct debug_obj *objs[ODEBUG_BATCH_SIZE];
        struct debug_percpu_free *percpu_pool;
@@ -328,7 +354,7 @@ static bool __free_object(struct debug_obj *obj)
                hlist_add_head(&obj->node, &percpu_pool->free_objs);
                percpu_pool->obj_free++;
                local_irq_restore(flags);
-               return false;
+               return;
        }
 
        /*
@@ -344,7 +370,8 @@ static bool __free_object(struct debug_obj *obj)
 
 free_to_obj_pool:
        raw_spin_lock(&pool_lock);
-       work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
+       work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
+              (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
        obj_pool_used--;
 
        if (work) {
@@ -358,6 +385,21 @@ free_to_obj_pool:
                                               &obj_to_free);
                        }
                }
+
+               if ((obj_pool_free > debug_objects_pool_size) &&
+                   (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
+                       int i;
+
+                       /*
+                        * Free one more batch of objects from obj_pool.
+                        */
+                       for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+                               obj = __alloc_object(&obj_pool);
+                               hlist_add_head(&obj->node, &obj_to_free);
+                               obj_pool_free--;
+                               obj_nr_tofree++;
+                       }
+               }
        } else {
                obj_pool_free++;
                hlist_add_head(&obj->node, &obj_pool);
@@ -372,7 +414,6 @@ free_to_obj_pool:
        }
        raw_spin_unlock(&pool_lock);
        local_irq_restore(flags);
-       return work;
 }
 
 /*
@@ -381,8 +422,11 @@ free_to_obj_pool:
  */
 static void free_object(struct debug_obj *obj)
 {
-       if (__free_object(obj))
-               schedule_work(&debug_obj_work);
+       __free_object(obj);
+       if (!obj_freeing && obj_nr_tofree) {
+               WRITE_ONCE(obj_freeing, true);
+               schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+       }
 }
 
 /*
@@ -484,6 +528,7 @@ static void
 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
 {
        enum debug_obj_state state;
+       bool check_stack = false;
        struct debug_bucket *db;
        struct debug_obj *obj;
        unsigned long flags;
@@ -503,7 +548,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
                        debug_objects_oom();
                        return;
                }
-               debug_object_is_on_stack(addr, onstack);
+               check_stack = true;
        }
 
        switch (obj->state) {
@@ -514,20 +559,23 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
                break;
 
        case ODEBUG_STATE_ACTIVE:
-               debug_print_object(obj, "init");
                state = obj->state;
                raw_spin_unlock_irqrestore(&db->lock, flags);
+               debug_print_object(obj, "init");
                debug_object_fixup(descr->fixup_init, addr, state);
                return;
 
        case ODEBUG_STATE_DESTROYED:
+               raw_spin_unlock_irqrestore(&db->lock, flags);
                debug_print_object(obj, "init");
-               break;
+               return;
        default:
                break;
        }
 
        raw_spin_unlock_irqrestore(&db->lock, flags);
+       if (check_stack)
+               debug_object_is_on_stack(addr, onstack);
 }
 
 /**
@@ -585,6 +633,8 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
 
        obj = lookup_object(addr, db);
        if (obj) {
+               bool print_object = false;
+
                switch (obj->state) {
                case ODEBUG_STATE_INIT:
                case ODEBUG_STATE_INACTIVE:
@@ -593,14 +643,14 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
                        break;
 
                case ODEBUG_STATE_ACTIVE:
-                       debug_print_object(obj, "activate");
                        state = obj->state;
                        raw_spin_unlock_irqrestore(&db->lock, flags);
+                       debug_print_object(obj, "activate");
                        ret = debug_object_fixup(descr->fixup_activate, addr, state);
                        return ret ? 0 : -EINVAL;
 
                case ODEBUG_STATE_DESTROYED:
-                       debug_print_object(obj, "activate");
+                       print_object = true;
                        ret = -EINVAL;
                        break;
                default:
@@ -608,10 +658,13 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
                        break;
                }
                raw_spin_unlock_irqrestore(&db->lock, flags);
+               if (print_object)
+                       debug_print_object(obj, "activate");
                return ret;
        }
 
        raw_spin_unlock_irqrestore(&db->lock, flags);
+
        /*
         * We are here when a static object is activated. We
         * let the type specific code confirm whether this is
@@ -643,6 +696,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
        struct debug_bucket *db;
        struct debug_obj *obj;
        unsigned long flags;
+       bool print_object = false;
 
        if (!debug_objects_enabled)
                return;
@@ -660,24 +714,27 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
                        if (!obj->astate)
                                obj->state = ODEBUG_STATE_INACTIVE;
                        else
-                               debug_print_object(obj, "deactivate");
+                               print_object = true;
                        break;
 
                case ODEBUG_STATE_DESTROYED:
-                       debug_print_object(obj, "deactivate");
+                       print_object = true;
                        break;
                default:
                        break;
                }
-       } else {
+       }
+
+       raw_spin_unlock_irqrestore(&db->lock, flags);
+       if (!obj) {
                struct debug_obj o = { .object = addr,
                                       .state = ODEBUG_STATE_NOTAVAILABLE,
                                       .descr = descr };
 
                debug_print_object(&o, "deactivate");
+       } else if (print_object) {
+               debug_print_object(obj, "deactivate");
        }
-
-       raw_spin_unlock_irqrestore(&db->lock, flags);
 }
 EXPORT_SYMBOL_GPL(debug_object_deactivate);
 
@@ -692,6 +749,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
        struct debug_bucket *db;
        struct debug_obj *obj;
        unsigned long flags;
+       bool print_object = false;
 
        if (!debug_objects_enabled)
                return;
@@ -711,20 +769,22 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
                obj->state = ODEBUG_STATE_DESTROYED;
                break;
        case ODEBUG_STATE_ACTIVE:
-               debug_print_object(obj, "destroy");
                state = obj->state;
                raw_spin_unlock_irqrestore(&db->lock, flags);
+               debug_print_object(obj, "destroy");
                debug_object_fixup(descr->fixup_destroy, addr, state);
                return;
 
        case ODEBUG_STATE_DESTROYED:
-               debug_print_object(obj, "destroy");
+               print_object = true;
                break;
        default:
                break;
        }
 out_unlock:
        raw_spin_unlock_irqrestore(&db->lock, flags);
+       if (print_object)
+               debug_print_object(obj, "destroy");
 }
 EXPORT_SYMBOL_GPL(debug_object_destroy);
 
@@ -753,9 +813,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
 
        switch (obj->state) {
        case ODEBUG_STATE_ACTIVE:
-               debug_print_object(obj, "free");
                state = obj->state;
                raw_spin_unlock_irqrestore(&db->lock, flags);
+               debug_print_object(obj, "free");
                debug_object_fixup(descr->fixup_free, addr, state);
                return;
        default:
@@ -828,6 +888,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
        struct debug_bucket *db;
        struct debug_obj *obj;
        unsigned long flags;
+       bool print_object = false;
 
        if (!debug_objects_enabled)
                return;
@@ -843,22 +904,25 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
                        if (obj->astate == expect)
                                obj->astate = next;
                        else
-                               debug_print_object(obj, "active_state");
+                               print_object = true;
                        break;
 
                default:
-                       debug_print_object(obj, "active_state");
+                       print_object = true;
                        break;
                }
-       } else {
+       }
+
+       raw_spin_unlock_irqrestore(&db->lock, flags);
+       if (!obj) {
                struct debug_obj o = { .object = addr,
                                       .state = ODEBUG_STATE_NOTAVAILABLE,
                                       .descr = descr };
 
                debug_print_object(&o, "active_state");
+       } else if (print_object) {
+               debug_print_object(obj, "active_state");
        }
-
-       raw_spin_unlock_irqrestore(&db->lock, flags);
 }
 EXPORT_SYMBOL_GPL(debug_object_active_state);
 
@@ -872,7 +936,6 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
        struct hlist_node *tmp;
        struct debug_obj *obj;
        int cnt, objs_checked = 0;
-       bool work = false;
 
        saddr = (unsigned long) address;
        eaddr = saddr + size;
@@ -894,16 +957,16 @@ repeat:
 
                        switch (obj->state) {
                        case ODEBUG_STATE_ACTIVE:
-                               debug_print_object(obj, "free");
                                descr = obj->descr;
                                state = obj->state;
                                raw_spin_unlock_irqrestore(&db->lock, flags);
+                               debug_print_object(obj, "free");
                                debug_object_fixup(descr->fixup_free,
                                                   (void *) oaddr, state);
                                goto repeat;
                        default:
                                hlist_del(&obj->node);
-                               work |= __free_object(obj);
+                               __free_object(obj);
                                break;
                        }
                }
@@ -919,8 +982,10 @@ repeat:
                debug_objects_maxchecked = objs_checked;
 
        /* Schedule work to actually kmem_cache_free() objects */
-       if (work)
-               schedule_work(&debug_obj_work);
+       if (!obj_freeing && obj_nr_tofree) {
+               WRITE_ONCE(obj_freeing, true);
+               schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+       }
 }
 
 void debug_check_no_obj_freed(const void *address, unsigned long size)