mm: memory.low hierarchical behavior
[linux-2.6-block.git] / mm / page_counter.c
index 41937c9a9d11df8aff94d0f94e3e9d7da312b3a5..a5ff4cbc355a4d270dacfc556536c94999f9ad88 100644 (file)
 #include <linux/bug.h>
 #include <asm/page.h>
 
+static void propagate_low_usage(struct page_counter *c, unsigned long usage)
+{
+       unsigned long low_usage, old;
+       long delta;
+
+       if (!c->parent)
+               return;
+
+       if (!c->low && !atomic_long_read(&c->low_usage))
+               return;
+
+       if (usage <= c->low)
+               low_usage = usage;
+       else
+               low_usage = 0;
+
+       old = atomic_long_xchg(&c->low_usage, low_usage);
+       delta = low_usage - old;
+       if (delta)
+               atomic_long_add(delta, &c->parent->children_low_usage);
+}
+
 /**
  * page_counter_cancel - take pages out of the local counter
  * @counter: counter
@@ -23,6 +45,7 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
        long new;
 
        new = atomic_long_sub_return(nr_pages, &counter->usage);
+       propagate_low_usage(counter, new);
        /* More uncharges than charges? */
        WARN_ON_ONCE(new < 0);
 }
@@ -42,6 +65,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
                long new;
 
                new = atomic_long_add_return(nr_pages, &c->usage);
+               propagate_low_usage(counter, new);
                /*
                 * This is indeed racy, but we can live with some
                 * inaccuracy in the watermark.
@@ -85,6 +109,7 @@ bool page_counter_try_charge(struct page_counter *counter,
                new = atomic_long_add_return(nr_pages, &c->usage);
                if (new > c->max) {
                        atomic_long_sub(nr_pages, &c->usage);
+                       propagate_low_usage(counter, new);
                        /*
                         * This is racy, but we can live with some
                         * inaccuracy in the failcnt.
@@ -93,6 +118,7 @@ bool page_counter_try_charge(struct page_counter *counter,
                        *fail = c;
                        goto failed;
                }
+               propagate_low_usage(counter, new);
                /*
                 * Just like with failcnt, we can live with some
                 * inaccuracy in the watermark.
@@ -164,6 +190,23 @@ int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
        }
 }
 
+/**
+ * page_counter_set_low - set the amount of protected memory
+ * @counter: counter
+ * @nr_pages: value to set
+ *
+ * The caller must serialize invocations on the same counter.
+ */
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
+{
+       struct page_counter *c;
+
+       counter->low = nr_pages;
+
+       for (c = counter; c; c = c->parent)
+               propagate_low_usage(c, atomic_long_read(&c->usage));
+}
+
 /**
  * page_counter_memparse - memparse() for page counter limits
  * @buf: string to parse