2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
14 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
17 pc->mem_cgroup = NULL;
18 pc->page = pfn_to_page(pfn);
20 static unsigned long total_usage;
22 #if !defined(CONFIG_SPARSEMEM)
25 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
27 pgdat->node_page_cgroup = NULL;
30 struct page_cgroup *lookup_page_cgroup(struct page *page)
32 unsigned long pfn = page_to_pfn(page);
34 struct page_cgroup *base;
36 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
40 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
44 static int __init alloc_node_page_cgroup(int nid)
46 struct page_cgroup *base, *pc;
47 unsigned long table_size;
48 unsigned long start_pfn, nr_pages, index;
50 start_pfn = NODE_DATA(nid)->node_start_pfn;
51 nr_pages = NODE_DATA(nid)->node_spanned_pages;
56 table_size = sizeof(struct page_cgroup) * nr_pages;
58 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
59 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
62 for (index = 0; index < nr_pages; index++) {
64 __init_page_cgroup(pc, start_pfn + index);
66 NODE_DATA(nid)->node_page_cgroup = base;
67 total_usage += table_size;
71 void __init page_cgroup_init(void)
76 if (mem_cgroup_subsys.disabled)
79 for_each_online_node(nid) {
80 fail = alloc_node_page_cgroup(nid);
84 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
85 printk(KERN_INFO "please try cgroup_disable=memory option if you"
89 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
90 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
91 panic("Out of memory");
94 #else /* CONFIG_FLAT_NODE_MEM_MAP */
96 struct page_cgroup *lookup_page_cgroup(struct page *page)
98 unsigned long pfn = page_to_pfn(page);
99 struct mem_section *section = __pfn_to_section(pfn);
101 return section->page_cgroup + pfn;
104 /* __alloc_bootmem...() is protected by !slab_available() */
105 static int __init_refok init_section_page_cgroup(unsigned long pfn)
107 struct mem_section *section = __pfn_to_section(pfn);
108 struct page_cgroup *base, *pc;
109 unsigned long table_size;
112 if (!section->page_cgroup) {
113 nid = page_to_nid(pfn_to_page(pfn));
114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
115 if (slab_is_available()) {
116 base = kmalloc_node(table_size, GFP_KERNEL, nid);
118 base = vmalloc_node(table_size, nid);
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
122 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
126 * We don't have to allocate page_cgroup again, but
127 * address of memmap may be changed. So, we have to initialize
130 base = section->page_cgroup + pfn;
132 /* check address of memmap is changed or not. */
133 if (base->page == pfn_to_page(pfn))
138 printk(KERN_ERR "page cgroup allocation failure\n");
142 for (index = 0; index < PAGES_PER_SECTION; index++) {
144 __init_page_cgroup(pc, pfn + index);
147 section->page_cgroup = base - pfn;
148 total_usage += table_size;
151 #ifdef CONFIG_MEMORY_HOTPLUG
152 void __free_page_cgroup(unsigned long pfn)
154 struct mem_section *ms;
155 struct page_cgroup *base;
157 ms = __pfn_to_section(pfn);
158 if (!ms || !ms->page_cgroup)
160 base = ms->page_cgroup + pfn;
161 if (is_vmalloc_addr(base)) {
163 ms->page_cgroup = NULL;
165 struct page *page = virt_to_page(base);
166 if (!PageReserved(page)) { /* Is bootmem ? */
168 ms->page_cgroup = NULL;
173 int __meminit online_page_cgroup(unsigned long start_pfn,
174 unsigned long nr_pages,
177 unsigned long start, end, pfn;
180 start = start_pfn & ~(PAGES_PER_SECTION - 1);
181 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
183 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
184 if (!pfn_present(pfn))
186 fail = init_section_page_cgroup(pfn);
192 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
193 __free_page_cgroup(pfn);
198 int __meminit offline_page_cgroup(unsigned long start_pfn,
199 unsigned long nr_pages, int nid)
201 unsigned long start, end, pfn;
203 start = start_pfn & ~(PAGES_PER_SECTION - 1);
204 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
206 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
207 __free_page_cgroup(pfn);
212 static int __meminit page_cgroup_callback(struct notifier_block *self,
213 unsigned long action, void *arg)
215 struct memory_notify *mn = arg;
218 case MEM_GOING_ONLINE:
219 ret = online_page_cgroup(mn->start_pfn,
220 mn->nr_pages, mn->status_change_nid);
223 offline_page_cgroup(mn->start_pfn,
224 mn->nr_pages, mn->status_change_nid);
226 case MEM_CANCEL_ONLINE:
227 case MEM_GOING_OFFLINE:
230 case MEM_CANCEL_OFFLINE:
235 ret = notifier_from_errno(ret);
244 void __init page_cgroup_init(void)
249 if (mem_cgroup_subsys.disabled)
252 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
253 if (!pfn_present(pfn))
255 fail = init_section_page_cgroup(pfn);
258 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
259 panic("Out of memory");
261 hotplug_memory_notifier(page_cgroup_callback, 0);
263 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
264 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
268 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
276 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
278 static DEFINE_MUTEX(swap_cgroup_mutex);
279 struct swap_cgroup_ctrl {
281 unsigned long length;
284 struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
287 * This 8bytes seems big..maybe we can reduce this when we can use "id" for
288 * cgroup rather than pointer.
291 struct mem_cgroup *val;
293 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
294 #define SC_POS_MASK (SC_PER_PAGE - 1)
297 * SwapCgroup implements "lookup" and "exchange" operations.
298 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
299 * against SwapCache. At swap_free(), this is accessed directly from swap.
302 * - we have no race in "exchange" when we're accessed via SwapCache because
303 * SwapCache(and its swp_entry) is under lock.
304 * - When called via swap_free(), there is no user of this entry and no race.
305 * Then, we don't need lock around "exchange".
307 * TODO: we can push these buffers out to HIGHMEM.
311 * allocate buffer for swap_cgroup.
313 static int swap_cgroup_prepare(int type)
316 struct swap_cgroup_ctrl *ctrl;
317 unsigned long idx, max;
319 if (!do_swap_account)
321 ctrl = &swap_cgroup_ctrl[type];
323 for (idx = 0; idx < ctrl->length; idx++) {
324 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
326 goto not_enough_page;
327 ctrl->map[idx] = page;
332 for (idx = 0; idx < max; idx++)
333 __free_page(ctrl->map[idx]);
339 * swap_cgroup_record - record mem_cgroup for this swp_entry.
340 * @ent: swap entry to be recorded into
341 * @mem: mem_cgroup to be recorded
343 * Returns old value at success, NULL at failure.
344 * (Of course, old value can be NULL.)
346 struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
348 int type = swp_type(ent);
349 unsigned long offset = swp_offset(ent);
350 unsigned long idx = offset / SC_PER_PAGE;
351 unsigned long pos = offset & SC_POS_MASK;
352 struct swap_cgroup_ctrl *ctrl;
353 struct page *mappage;
354 struct swap_cgroup *sc;
355 struct mem_cgroup *old;
357 if (!do_swap_account)
360 ctrl = &swap_cgroup_ctrl[type];
362 mappage = ctrl->map[idx];
363 sc = page_address(mappage);
372 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
373 * @ent: swap entry to be looked up.
375 * Returns pointer to mem_cgroup at success. NULL at failure.
377 struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
379 int type = swp_type(ent);
380 unsigned long offset = swp_offset(ent);
381 unsigned long idx = offset / SC_PER_PAGE;
382 unsigned long pos = offset & SC_POS_MASK;
383 struct swap_cgroup_ctrl *ctrl;
384 struct page *mappage;
385 struct swap_cgroup *sc;
386 struct mem_cgroup *ret;
388 if (!do_swap_account)
391 ctrl = &swap_cgroup_ctrl[type];
392 mappage = ctrl->map[idx];
393 sc = page_address(mappage);
399 int swap_cgroup_swapon(int type, unsigned long max_pages)
402 unsigned long array_size;
403 unsigned long length;
404 struct swap_cgroup_ctrl *ctrl;
406 if (!do_swap_account)
409 length = ((max_pages/SC_PER_PAGE) + 1);
410 array_size = length * sizeof(void *);
412 array = vmalloc(array_size);
416 memset(array, 0, array_size);
417 ctrl = &swap_cgroup_ctrl[type];
418 mutex_lock(&swap_cgroup_mutex);
419 ctrl->length = length;
421 if (swap_cgroup_prepare(type)) {
422 /* memory shortage */
426 mutex_unlock(&swap_cgroup_mutex);
429 mutex_unlock(&swap_cgroup_mutex);
432 "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
433 " and %ld bytes to hold mem_cgroup pointers on swap\n",
434 array_size, length * PAGE_SIZE);
436 "swap_cgroup can be disabled by noswapaccount boot option.\n");
440 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
442 "swap_cgroup can be disabled by noswapaccount boot option\n");
446 void swap_cgroup_swapoff(int type)
449 struct swap_cgroup_ctrl *ctrl;
451 if (!do_swap_account)
454 mutex_lock(&swap_cgroup_mutex);
455 ctrl = &swap_cgroup_ctrl[type];
457 for (i = 0; i < ctrl->length; i++) {
458 struct page *page = ctrl->map[i];
466 mutex_unlock(&swap_cgroup_mutex);