Merge tag 'pinctrl-v4.14-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6-block.git] / mm / memcontrol.c
index 15af3da5af02f6acbccff3551a71461a4f4396d5..d5f3a62887cf958f6b657c0f542f0cf2c3e86e8d 100644 (file)
@@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy)
        struct memcg_stock_pcp *stock;
        unsigned long flags;
 
+       /*
+        * The only protection from memory hotplug vs. drain_stock races is
+        * that we always operate on local CPU stock here with IRQ disabled
+        */
        local_irq_save(flags);
 
        stock = this_cpu_ptr(&memcg_stock);
@@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
        /* If someone's already draining, avoid adding running more workers. */
        if (!mutex_trylock(&percpu_charge_mutex))
                return;
-       /* Notify other cpus that system-wide "drain" is running */
-       get_online_cpus();
+       /*
+        * Notify other cpus that system-wide "drain" is running
+        * We do not care about races with the cpu hotplug because cpu down
+        * as well as workers from this path always operate on the local
+        * per-cpu data. CPU up doesn't touch memcg_stock at all.
+        */
        curcpu = get_cpu();
        for_each_online_cpu(cpu) {
                struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
                struct mem_cgroup *memcg;
 
                memcg = stock->cached;
-               if (!memcg || !stock->nr_pages)
+               if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
                        continue;
-               if (!mem_cgroup_is_descendant(memcg, root_memcg))
+               if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
+                       css_put(&memcg->css);
                        continue;
+               }
                if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
                        if (cpu == curcpu)
                                drain_local_stock(&stock->work);
                        else
                                schedule_work_on(cpu, &stock->work);
                }
+               css_put(&memcg->css);
        }
        put_cpu();
-       put_online_cpus();
        mutex_unlock(&percpu_charge_mutex);
 }
 
@@ -5648,7 +5658,8 @@ static void uncharge_batch(const struct uncharge_gather *ug)
 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
 {
        VM_BUG_ON_PAGE(PageLRU(page), page);
-       VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
+       VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
+                       !PageHWPoison(page) , page);
 
        if (!page->mem_cgroup)
                return;