Commit | Line | Data |
---|---|---|
8cdea7c0 BS |
1 | /* memcontrol.c - Memory Controller |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
78fb7466 PE |
6 | * Copyright 2007 OpenVZ SWsoft Inc |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
8cdea7c0 BS |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #include <linux/res_counter.h> | |
21 | #include <linux/memcontrol.h> | |
22 | #include <linux/cgroup.h> | |
78fb7466 | 23 | #include <linux/mm.h> |
d52aa412 | 24 | #include <linux/smp.h> |
8a9f3ccd | 25 | #include <linux/page-flags.h> |
66e1707b | 26 | #include <linux/backing-dev.h> |
8a9f3ccd BS |
27 | #include <linux/bit_spinlock.h> |
28 | #include <linux/rcupdate.h> | |
66e1707b BS |
29 | #include <linux/swap.h> |
30 | #include <linux/spinlock.h> | |
31 | #include <linux/fs.h> | |
8cdea7c0 | 32 | |
8697d331 BS |
33 | #include <asm/uaccess.h> |
34 | ||
8cdea7c0 | 35 | struct cgroup_subsys mem_cgroup_subsys; |
66e1707b | 36 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; |
8cdea7c0 | 37 | |
d52aa412 KH |
38 | /* |
39 | * Statistics for memory cgroup. | |
40 | */ | |
41 | enum mem_cgroup_stat_index { | |
42 | /* | |
43 | * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | |
44 | */ | |
45 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ | |
46 | MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */ | |
47 | ||
48 | MEM_CGROUP_STAT_NSTATS, | |
49 | }; | |
50 | ||
51 | struct mem_cgroup_stat_cpu { | |
52 | s64 count[MEM_CGROUP_STAT_NSTATS]; | |
53 | } ____cacheline_aligned_in_smp; | |
54 | ||
55 | struct mem_cgroup_stat { | |
56 | struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; | |
57 | }; | |
58 | ||
59 | /* | |
60 | * For accounting under irq disable, no need for increment preempt count. | |
61 | */ | |
62 | static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, | |
63 | enum mem_cgroup_stat_index idx, int val) | |
64 | { | |
65 | int cpu = smp_processor_id(); | |
66 | stat->cpustat[cpu].count[idx] += val; | |
67 | } | |
68 | ||
69 | static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, | |
70 | enum mem_cgroup_stat_index idx) | |
71 | { | |
72 | int cpu; | |
73 | s64 ret = 0; | |
74 | for_each_possible_cpu(cpu) | |
75 | ret += stat->cpustat[cpu].count[idx]; | |
76 | return ret; | |
77 | } | |
78 | ||
8cdea7c0 BS |
79 | /* |
80 | * The memory controller data structure. The memory controller controls both | |
81 | * page cache and RSS per cgroup. We would eventually like to provide | |
82 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
83 | * to help the administrator determine what knobs to tune. | |
84 | * | |
85 | * TODO: Add a water mark for the memory controller. Reclaim will begin when | |
8a9f3ccd BS |
86 | * we hit the water mark. May be even add a low water mark, such that |
87 | * no reclaim occurs from a cgroup at it's low water mark, this is | |
88 | * a feature that will be implemented much later in the future. | |
8cdea7c0 BS |
89 | */ |
90 | struct mem_cgroup { | |
91 | struct cgroup_subsys_state css; | |
92 | /* | |
93 | * the counter to account for memory usage | |
94 | */ | |
95 | struct res_counter res; | |
78fb7466 PE |
96 | /* |
97 | * Per cgroup active and inactive list, similar to the | |
98 | * per zone LRU lists. | |
99 | * TODO: Consider making these lists per zone | |
100 | */ | |
101 | struct list_head active_list; | |
102 | struct list_head inactive_list; | |
66e1707b BS |
103 | /* |
104 | * spin_lock to protect the per cgroup LRU | |
105 | */ | |
106 | spinlock_t lru_lock; | |
8697d331 | 107 | unsigned long control_type; /* control RSS or RSS+Pagecache */ |
d52aa412 KH |
108 | /* |
109 | * statistics. | |
110 | */ | |
111 | struct mem_cgroup_stat stat; | |
8cdea7c0 BS |
112 | }; |
113 | ||
8a9f3ccd BS |
114 | /* |
115 | * We use the lower bit of the page->page_cgroup pointer as a bit spin | |
116 | * lock. We need to ensure that page->page_cgroup is atleast two | |
117 | * byte aligned (based on comments from Nick Piggin) | |
118 | */ | |
119 | #define PAGE_CGROUP_LOCK_BIT 0x0 | |
120 | #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) | |
121 | ||
8cdea7c0 BS |
122 | /* |
123 | * A page_cgroup page is associated with every page descriptor. The | |
124 | * page_cgroup helps us identify information about the cgroup | |
125 | */ | |
126 | struct page_cgroup { | |
127 | struct list_head lru; /* per cgroup LRU list */ | |
128 | struct page *page; | |
129 | struct mem_cgroup *mem_cgroup; | |
8a9f3ccd BS |
130 | atomic_t ref_cnt; /* Helpful when pages move b/w */ |
131 | /* mapped and cached states */ | |
217bc319 | 132 | int flags; |
8cdea7c0 | 133 | }; |
217bc319 | 134 | #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ |
3564c7c4 | 135 | #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ |
8cdea7c0 | 136 | |
8697d331 BS |
137 | enum { |
138 | MEM_CGROUP_TYPE_UNSPEC = 0, | |
139 | MEM_CGROUP_TYPE_MAPPED, | |
140 | MEM_CGROUP_TYPE_CACHED, | |
141 | MEM_CGROUP_TYPE_ALL, | |
142 | MEM_CGROUP_TYPE_MAX, | |
143 | }; | |
144 | ||
217bc319 KH |
145 | enum charge_type { |
146 | MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | |
147 | MEM_CGROUP_CHARGE_TYPE_MAPPED, | |
148 | }; | |
149 | ||
d52aa412 KH |
150 | /* |
151 | * Always modified under lru lock. Then, not necessary to preempt_disable() | |
152 | */ | |
153 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, | |
154 | bool charge) | |
155 | { | |
156 | int val = (charge)? 1 : -1; | |
157 | struct mem_cgroup_stat *stat = &mem->stat; | |
158 | VM_BUG_ON(!irqs_disabled()); | |
159 | ||
160 | if (flags & PAGE_CGROUP_FLAG_CACHE) | |
161 | __mem_cgroup_stat_add_safe(stat, | |
162 | MEM_CGROUP_STAT_CACHE, val); | |
163 | else | |
164 | __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); | |
165 | ||
166 | } | |
167 | ||
8697d331 | 168 | static struct mem_cgroup init_mem_cgroup; |
8cdea7c0 BS |
169 | |
170 | static inline | |
171 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | |
172 | { | |
173 | return container_of(cgroup_subsys_state(cont, | |
174 | mem_cgroup_subsys_id), struct mem_cgroup, | |
175 | css); | |
176 | } | |
177 | ||
78fb7466 PE |
178 | static inline |
179 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | |
180 | { | |
181 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | |
182 | struct mem_cgroup, css); | |
183 | } | |
184 | ||
185 | void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) | |
186 | { | |
187 | struct mem_cgroup *mem; | |
188 | ||
189 | mem = mem_cgroup_from_task(p); | |
190 | css_get(&mem->css); | |
191 | mm->mem_cgroup = mem; | |
192 | } | |
193 | ||
194 | void mm_free_cgroup(struct mm_struct *mm) | |
195 | { | |
196 | css_put(&mm->mem_cgroup->css); | |
197 | } | |
198 | ||
8a9f3ccd BS |
199 | static inline int page_cgroup_locked(struct page *page) |
200 | { | |
201 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, | |
202 | &page->page_cgroup); | |
203 | } | |
204 | ||
78fb7466 PE |
205 | void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) |
206 | { | |
8a9f3ccd BS |
207 | int locked; |
208 | ||
209 | /* | |
210 | * While resetting the page_cgroup we might not hold the | |
211 | * page_cgroup lock. free_hot_cold_page() is an example | |
212 | * of such a scenario | |
213 | */ | |
214 | if (pc) | |
215 | VM_BUG_ON(!page_cgroup_locked(page)); | |
216 | locked = (page->page_cgroup & PAGE_CGROUP_LOCK); | |
217 | page->page_cgroup = ((unsigned long)pc | locked); | |
78fb7466 PE |
218 | } |
219 | ||
220 | struct page_cgroup *page_get_page_cgroup(struct page *page) | |
221 | { | |
8a9f3ccd BS |
222 | return (struct page_cgroup *) |
223 | (page->page_cgroup & ~PAGE_CGROUP_LOCK); | |
224 | } | |
225 | ||
8697d331 | 226 | static void __always_inline lock_page_cgroup(struct page *page) |
8a9f3ccd BS |
227 | { |
228 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
229 | VM_BUG_ON(!page_cgroup_locked(page)); | |
230 | } | |
231 | ||
8697d331 | 232 | static void __always_inline unlock_page_cgroup(struct page *page) |
8a9f3ccd BS |
233 | { |
234 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
235 | } | |
236 | ||
9175e031 KH |
237 | /* |
238 | * Tie new page_cgroup to struct page under lock_page_cgroup() | |
239 | * This can fail if the page has been tied to a page_cgroup. | |
240 | * If success, returns 0. | |
241 | */ | |
d52aa412 KH |
242 | static int page_cgroup_assign_new_page_cgroup(struct page *page, |
243 | struct page_cgroup *pc) | |
9175e031 KH |
244 | { |
245 | int ret = 0; | |
246 | ||
247 | lock_page_cgroup(page); | |
248 | if (!page_get_page_cgroup(page)) | |
249 | page_assign_page_cgroup(page, pc); | |
250 | else /* A page is tied to other pc. */ | |
251 | ret = 1; | |
252 | unlock_page_cgroup(page); | |
253 | return ret; | |
254 | } | |
255 | ||
256 | /* | |
257 | * Clear page->page_cgroup member under lock_page_cgroup(). | |
258 | * If given "pc" value is different from one page->page_cgroup, | |
259 | * page->cgroup is not cleared. | |
260 | * Returns a value of page->page_cgroup at lock taken. | |
261 | * A can can detect failure of clearing by following | |
262 | * clear_page_cgroup(page, pc) == pc | |
263 | */ | |
264 | ||
d52aa412 KH |
265 | static struct page_cgroup *clear_page_cgroup(struct page *page, |
266 | struct page_cgroup *pc) | |
9175e031 KH |
267 | { |
268 | struct page_cgroup *ret; | |
269 | /* lock and clear */ | |
270 | lock_page_cgroup(page); | |
271 | ret = page_get_page_cgroup(page); | |
272 | if (likely(ret == pc)) | |
273 | page_assign_page_cgroup(page, NULL); | |
274 | unlock_page_cgroup(page); | |
275 | return ret; | |
276 | } | |
277 | ||
8697d331 | 278 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) |
66e1707b | 279 | { |
3564c7c4 KH |
280 | if (active) { |
281 | pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; | |
66e1707b | 282 | list_move(&pc->lru, &pc->mem_cgroup->active_list); |
3564c7c4 KH |
283 | } else { |
284 | pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; | |
66e1707b | 285 | list_move(&pc->lru, &pc->mem_cgroup->inactive_list); |
3564c7c4 | 286 | } |
66e1707b BS |
287 | } |
288 | ||
4c4a2214 DR |
289 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) |
290 | { | |
291 | int ret; | |
292 | ||
293 | task_lock(task); | |
294 | ret = task->mm && mm_cgroup(task->mm) == mem; | |
295 | task_unlock(task); | |
296 | return ret; | |
297 | } | |
298 | ||
66e1707b BS |
299 | /* |
300 | * This routine assumes that the appropriate zone's lru lock is already held | |
301 | */ | |
302 | void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
303 | { | |
304 | struct mem_cgroup *mem; | |
305 | if (!pc) | |
306 | return; | |
307 | ||
308 | mem = pc->mem_cgroup; | |
309 | ||
310 | spin_lock(&mem->lru_lock); | |
311 | __mem_cgroup_move_lists(pc, active); | |
312 | spin_unlock(&mem->lru_lock); | |
313 | } | |
314 | ||
315 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |
316 | struct list_head *dst, | |
317 | unsigned long *scanned, int order, | |
318 | int mode, struct zone *z, | |
319 | struct mem_cgroup *mem_cont, | |
320 | int active) | |
321 | { | |
322 | unsigned long nr_taken = 0; | |
323 | struct page *page; | |
324 | unsigned long scan; | |
325 | LIST_HEAD(pc_list); | |
326 | struct list_head *src; | |
ff7283fa | 327 | struct page_cgroup *pc, *tmp; |
66e1707b BS |
328 | |
329 | if (active) | |
330 | src = &mem_cont->active_list; | |
331 | else | |
332 | src = &mem_cont->inactive_list; | |
333 | ||
334 | spin_lock(&mem_cont->lru_lock); | |
ff7283fa KH |
335 | scan = 0; |
336 | list_for_each_entry_safe_reverse(pc, tmp, src, lru) { | |
436c6541 | 337 | if (scan >= nr_to_scan) |
ff7283fa | 338 | break; |
66e1707b BS |
339 | page = pc->page; |
340 | VM_BUG_ON(!pc); | |
341 | ||
436c6541 | 342 | if (unlikely(!PageLRU(page))) |
ff7283fa | 343 | continue; |
ff7283fa | 344 | |
66e1707b BS |
345 | if (PageActive(page) && !active) { |
346 | __mem_cgroup_move_lists(pc, true); | |
66e1707b BS |
347 | continue; |
348 | } | |
349 | if (!PageActive(page) && active) { | |
350 | __mem_cgroup_move_lists(pc, false); | |
66e1707b BS |
351 | continue; |
352 | } | |
353 | ||
354 | /* | |
355 | * Reclaim, per zone | |
356 | * TODO: make the active/inactive lists per zone | |
357 | */ | |
358 | if (page_zone(page) != z) | |
359 | continue; | |
360 | ||
436c6541 HD |
361 | scan++; |
362 | list_move(&pc->lru, &pc_list); | |
66e1707b BS |
363 | |
364 | if (__isolate_lru_page(page, mode) == 0) { | |
365 | list_move(&page->lru, dst); | |
366 | nr_taken++; | |
367 | } | |
368 | } | |
369 | ||
370 | list_splice(&pc_list, src); | |
371 | spin_unlock(&mem_cont->lru_lock); | |
372 | ||
373 | *scanned = scan; | |
374 | return nr_taken; | |
375 | } | |
376 | ||
8a9f3ccd BS |
377 | /* |
378 | * Charge the memory controller for page usage. | |
379 | * Return | |
380 | * 0 if the charge was successful | |
381 | * < 0 if the cgroup is over its limit | |
382 | */ | |
217bc319 KH |
383 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, |
384 | gfp_t gfp_mask, enum charge_type ctype) | |
8a9f3ccd BS |
385 | { |
386 | struct mem_cgroup *mem; | |
9175e031 | 387 | struct page_cgroup *pc; |
66e1707b BS |
388 | unsigned long flags; |
389 | unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | |
8a9f3ccd BS |
390 | |
391 | /* | |
392 | * Should page_cgroup's go to their own slab? | |
393 | * One could optimize the performance of the charging routine | |
394 | * by saving a bit in the page_flags and using it as a lock | |
395 | * to see if the cgroup page already has a page_cgroup associated | |
396 | * with it | |
397 | */ | |
66e1707b | 398 | retry: |
82369553 HD |
399 | if (page) { |
400 | lock_page_cgroup(page); | |
401 | pc = page_get_page_cgroup(page); | |
402 | /* | |
403 | * The page_cgroup exists and | |
404 | * the page has already been accounted. | |
405 | */ | |
406 | if (pc) { | |
407 | if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { | |
408 | /* this page is under being uncharged ? */ | |
409 | unlock_page_cgroup(page); | |
410 | cpu_relax(); | |
411 | goto retry; | |
412 | } else { | |
413 | unlock_page_cgroup(page); | |
414 | goto done; | |
415 | } | |
9175e031 | 416 | } |
82369553 | 417 | unlock_page_cgroup(page); |
8a9f3ccd | 418 | } |
8a9f3ccd | 419 | |
e1a1cd59 | 420 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); |
8a9f3ccd BS |
421 | if (pc == NULL) |
422 | goto err; | |
423 | ||
8a9f3ccd | 424 | /* |
3be91277 HD |
425 | * We always charge the cgroup the mm_struct belongs to. |
426 | * The mm_struct's mem_cgroup changes on task migration if the | |
8a9f3ccd BS |
427 | * thread group leader migrates. It's possible that mm is not |
428 | * set, if so charge the init_mm (happens for pagecache usage). | |
429 | */ | |
430 | if (!mm) | |
431 | mm = &init_mm; | |
432 | ||
3be91277 | 433 | rcu_read_lock(); |
8a9f3ccd BS |
434 | mem = rcu_dereference(mm->mem_cgroup); |
435 | /* | |
436 | * For every charge from the cgroup, increment reference | |
437 | * count | |
438 | */ | |
439 | css_get(&mem->css); | |
440 | rcu_read_unlock(); | |
441 | ||
442 | /* | |
443 | * If we created the page_cgroup, we should free it on exceeding | |
444 | * the cgroup limit. | |
445 | */ | |
0eea1030 | 446 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { |
3be91277 HD |
447 | if (!(gfp_mask & __GFP_WAIT)) |
448 | goto out; | |
e1a1cd59 BS |
449 | |
450 | if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | |
66e1707b BS |
451 | continue; |
452 | ||
453 | /* | |
454 | * try_to_free_mem_cgroup_pages() might not give us a full | |
455 | * picture of reclaim. Some pages are reclaimed and might be | |
456 | * moved to swap cache or just unmapped from the cgroup. | |
457 | * Check the limit again to see if the reclaim reduced the | |
458 | * current usage of the cgroup before giving up | |
459 | */ | |
460 | if (res_counter_check_under_limit(&mem->res)) | |
461 | continue; | |
3be91277 HD |
462 | |
463 | if (!nr_retries--) { | |
464 | mem_cgroup_out_of_memory(mem, gfp_mask); | |
465 | goto out; | |
66e1707b | 466 | } |
3be91277 | 467 | congestion_wait(WRITE, HZ/10); |
8a9f3ccd BS |
468 | } |
469 | ||
8a9f3ccd BS |
470 | atomic_set(&pc->ref_cnt, 1); |
471 | pc->mem_cgroup = mem; | |
472 | pc->page = page; | |
3564c7c4 | 473 | pc->flags = PAGE_CGROUP_FLAG_ACTIVE; |
217bc319 KH |
474 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) |
475 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; | |
3be91277 | 476 | |
82369553 | 477 | if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) { |
9175e031 | 478 | /* |
3be91277 HD |
479 | * Another charge has been added to this page already. |
480 | * We take lock_page_cgroup(page) again and read | |
9175e031 KH |
481 | * page->cgroup, increment refcnt.... just retry is OK. |
482 | */ | |
483 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
484 | css_put(&mem->css); | |
485 | kfree(pc); | |
82369553 HD |
486 | if (!page) |
487 | goto done; | |
9175e031 KH |
488 | goto retry; |
489 | } | |
8a9f3ccd | 490 | |
66e1707b | 491 | spin_lock_irqsave(&mem->lru_lock, flags); |
d52aa412 KH |
492 | /* Update statistics vector */ |
493 | mem_cgroup_charge_statistics(mem, pc->flags, true); | |
66e1707b BS |
494 | list_add(&pc->lru, &mem->active_list); |
495 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
496 | ||
8a9f3ccd | 497 | done: |
8a9f3ccd | 498 | return 0; |
3be91277 HD |
499 | out: |
500 | css_put(&mem->css); | |
8a9f3ccd | 501 | kfree(pc); |
8a9f3ccd | 502 | err: |
8a9f3ccd BS |
503 | return -ENOMEM; |
504 | } | |
505 | ||
217bc319 KH |
506 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
507 | gfp_t gfp_mask) | |
508 | { | |
509 | return mem_cgroup_charge_common(page, mm, gfp_mask, | |
510 | MEM_CGROUP_CHARGE_TYPE_MAPPED); | |
511 | } | |
512 | ||
8697d331 BS |
513 | /* |
514 | * See if the cached pages should be charged at all? | |
515 | */ | |
e1a1cd59 BS |
516 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
517 | gfp_t gfp_mask) | |
8697d331 | 518 | { |
ac44d354 | 519 | int ret = 0; |
8697d331 BS |
520 | struct mem_cgroup *mem; |
521 | if (!mm) | |
522 | mm = &init_mm; | |
523 | ||
ac44d354 | 524 | rcu_read_lock(); |
8697d331 | 525 | mem = rcu_dereference(mm->mem_cgroup); |
ac44d354 BS |
526 | css_get(&mem->css); |
527 | rcu_read_unlock(); | |
8697d331 | 528 | if (mem->control_type == MEM_CGROUP_TYPE_ALL) |
ac44d354 | 529 | ret = mem_cgroup_charge_common(page, mm, gfp_mask, |
217bc319 | 530 | MEM_CGROUP_CHARGE_TYPE_CACHE); |
ac44d354 BS |
531 | css_put(&mem->css); |
532 | return ret; | |
8697d331 BS |
533 | } |
534 | ||
8a9f3ccd BS |
535 | /* |
536 | * Uncharging is always a welcome operation, we never complain, simply | |
537 | * uncharge. | |
538 | */ | |
539 | void mem_cgroup_uncharge(struct page_cgroup *pc) | |
540 | { | |
541 | struct mem_cgroup *mem; | |
542 | struct page *page; | |
66e1707b | 543 | unsigned long flags; |
8a9f3ccd | 544 | |
8697d331 BS |
545 | /* |
546 | * This can handle cases when a page is not charged at all and we | |
547 | * are switching between handling the control_type. | |
548 | */ | |
8a9f3ccd BS |
549 | if (!pc) |
550 | return; | |
551 | ||
552 | if (atomic_dec_and_test(&pc->ref_cnt)) { | |
553 | page = pc->page; | |
9175e031 KH |
554 | /* |
555 | * get page->cgroup and clear it under lock. | |
cc847582 | 556 | * force_empty can drop page->cgroup without checking refcnt. |
9175e031 KH |
557 | */ |
558 | if (clear_page_cgroup(page, pc) == pc) { | |
559 | mem = pc->mem_cgroup; | |
560 | css_put(&mem->css); | |
561 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
562 | spin_lock_irqsave(&mem->lru_lock, flags); | |
563 | list_del_init(&pc->lru); | |
d52aa412 | 564 | mem_cgroup_charge_statistics(mem, pc->flags, false); |
9175e031 KH |
565 | spin_unlock_irqrestore(&mem->lru_lock, flags); |
566 | kfree(pc); | |
9175e031 | 567 | } |
8a9f3ccd | 568 | } |
78fb7466 | 569 | } |
ae41be37 KH |
570 | /* |
571 | * Returns non-zero if a page (under migration) has valid page_cgroup member. | |
572 | * Refcnt of page_cgroup is incremented. | |
573 | */ | |
574 | ||
575 | int mem_cgroup_prepare_migration(struct page *page) | |
576 | { | |
577 | struct page_cgroup *pc; | |
578 | int ret = 0; | |
579 | lock_page_cgroup(page); | |
580 | pc = page_get_page_cgroup(page); | |
581 | if (pc && atomic_inc_not_zero(&pc->ref_cnt)) | |
582 | ret = 1; | |
583 | unlock_page_cgroup(page); | |
584 | return ret; | |
585 | } | |
586 | ||
587 | void mem_cgroup_end_migration(struct page *page) | |
588 | { | |
589 | struct page_cgroup *pc = page_get_page_cgroup(page); | |
590 | mem_cgroup_uncharge(pc); | |
591 | } | |
592 | /* | |
593 | * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. | |
594 | * And no race with uncharge() routines because page_cgroup for *page* | |
595 | * has extra one reference by mem_cgroup_prepare_migration. | |
596 | */ | |
597 | ||
598 | void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |
599 | { | |
600 | struct page_cgroup *pc; | |
601 | retry: | |
602 | pc = page_get_page_cgroup(page); | |
603 | if (!pc) | |
604 | return; | |
605 | if (clear_page_cgroup(page, pc) != pc) | |
606 | goto retry; | |
607 | pc->page = newpage; | |
608 | lock_page_cgroup(newpage); | |
609 | page_assign_page_cgroup(newpage, pc); | |
610 | unlock_page_cgroup(newpage); | |
611 | return; | |
612 | } | |
78fb7466 | 613 | |
cc847582 KH |
614 | /* |
615 | * This routine traverse page_cgroup in given list and drop them all. | |
616 | * This routine ignores page_cgroup->ref_cnt. | |
617 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | |
618 | */ | |
619 | #define FORCE_UNCHARGE_BATCH (128) | |
620 | static void | |
621 | mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list) | |
622 | { | |
623 | struct page_cgroup *pc; | |
624 | struct page *page; | |
625 | int count; | |
626 | unsigned long flags; | |
627 | ||
628 | retry: | |
629 | count = FORCE_UNCHARGE_BATCH; | |
630 | spin_lock_irqsave(&mem->lru_lock, flags); | |
631 | ||
632 | while (--count && !list_empty(list)) { | |
633 | pc = list_entry(list->prev, struct page_cgroup, lru); | |
634 | page = pc->page; | |
635 | /* Avoid race with charge */ | |
636 | atomic_set(&pc->ref_cnt, 0); | |
637 | if (clear_page_cgroup(page, pc) == pc) { | |
638 | css_put(&mem->css); | |
639 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
640 | list_del_init(&pc->lru); | |
d52aa412 | 641 | mem_cgroup_charge_statistics(mem, pc->flags, false); |
cc847582 KH |
642 | kfree(pc); |
643 | } else /* being uncharged ? ...do relax */ | |
644 | break; | |
645 | } | |
646 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
647 | if (!list_empty(list)) { | |
648 | cond_resched(); | |
649 | goto retry; | |
650 | } | |
651 | return; | |
652 | } | |
653 | ||
654 | /* | |
655 | * make mem_cgroup's charge to be 0 if there is no task. | |
656 | * This enables deleting this mem_cgroup. | |
657 | */ | |
658 | ||
659 | int mem_cgroup_force_empty(struct mem_cgroup *mem) | |
660 | { | |
661 | int ret = -EBUSY; | |
662 | css_get(&mem->css); | |
663 | /* | |
664 | * page reclaim code (kswapd etc..) will move pages between | |
665 | ` * active_list <-> inactive_list while we don't take a lock. | |
666 | * So, we have to do loop here until all lists are empty. | |
667 | */ | |
668 | while (!(list_empty(&mem->active_list) && | |
669 | list_empty(&mem->inactive_list))) { | |
670 | if (atomic_read(&mem->css.cgroup->count) > 0) | |
671 | goto out; | |
672 | /* drop all page_cgroup in active_list */ | |
673 | mem_cgroup_force_empty_list(mem, &mem->active_list); | |
674 | /* drop all page_cgroup in inactive_list */ | |
675 | mem_cgroup_force_empty_list(mem, &mem->inactive_list); | |
676 | } | |
677 | ret = 0; | |
678 | out: | |
679 | css_put(&mem->css); | |
680 | return ret; | |
681 | } | |
682 | ||
683 | ||
684 | ||
0eea1030 BS |
685 | int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) |
686 | { | |
687 | *tmp = memparse(buf, &buf); | |
688 | if (*buf != '\0') | |
689 | return -EINVAL; | |
690 | ||
691 | /* | |
692 | * Round up the value to the closest page size | |
693 | */ | |
694 | *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT; | |
695 | return 0; | |
696 | } | |
697 | ||
698 | static ssize_t mem_cgroup_read(struct cgroup *cont, | |
699 | struct cftype *cft, struct file *file, | |
700 | char __user *userbuf, size_t nbytes, loff_t *ppos) | |
8cdea7c0 BS |
701 | { |
702 | return res_counter_read(&mem_cgroup_from_cont(cont)->res, | |
0eea1030 BS |
703 | cft->private, userbuf, nbytes, ppos, |
704 | NULL); | |
8cdea7c0 BS |
705 | } |
706 | ||
707 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | |
708 | struct file *file, const char __user *userbuf, | |
709 | size_t nbytes, loff_t *ppos) | |
710 | { | |
711 | return res_counter_write(&mem_cgroup_from_cont(cont)->res, | |
0eea1030 BS |
712 | cft->private, userbuf, nbytes, ppos, |
713 | mem_cgroup_write_strategy); | |
8cdea7c0 BS |
714 | } |
715 | ||
8697d331 BS |
716 | static ssize_t mem_control_type_write(struct cgroup *cont, |
717 | struct cftype *cft, struct file *file, | |
718 | const char __user *userbuf, | |
719 | size_t nbytes, loff_t *pos) | |
720 | { | |
721 | int ret; | |
722 | char *buf, *end; | |
723 | unsigned long tmp; | |
724 | struct mem_cgroup *mem; | |
725 | ||
726 | mem = mem_cgroup_from_cont(cont); | |
727 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | |
728 | ret = -ENOMEM; | |
729 | if (buf == NULL) | |
730 | goto out; | |
731 | ||
732 | buf[nbytes] = 0; | |
733 | ret = -EFAULT; | |
734 | if (copy_from_user(buf, userbuf, nbytes)) | |
735 | goto out_free; | |
736 | ||
737 | ret = -EINVAL; | |
738 | tmp = simple_strtoul(buf, &end, 10); | |
739 | if (*end != '\0') | |
740 | goto out_free; | |
741 | ||
742 | if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) | |
743 | goto out_free; | |
744 | ||
745 | mem->control_type = tmp; | |
746 | ret = nbytes; | |
747 | out_free: | |
748 | kfree(buf); | |
749 | out: | |
750 | return ret; | |
751 | } | |
752 | ||
753 | static ssize_t mem_control_type_read(struct cgroup *cont, | |
754 | struct cftype *cft, | |
755 | struct file *file, char __user *userbuf, | |
756 | size_t nbytes, loff_t *ppos) | |
757 | { | |
758 | unsigned long val; | |
759 | char buf[64], *s; | |
760 | struct mem_cgroup *mem; | |
761 | ||
762 | mem = mem_cgroup_from_cont(cont); | |
763 | s = buf; | |
764 | val = mem->control_type; | |
765 | s += sprintf(s, "%lu\n", val); | |
766 | return simple_read_from_buffer((void __user *)userbuf, nbytes, | |
767 | ppos, buf, s - buf); | |
768 | } | |
769 | ||
cc847582 KH |
770 | |
771 | static ssize_t mem_force_empty_write(struct cgroup *cont, | |
772 | struct cftype *cft, struct file *file, | |
773 | const char __user *userbuf, | |
774 | size_t nbytes, loff_t *ppos) | |
775 | { | |
776 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | |
777 | int ret; | |
778 | ret = mem_cgroup_force_empty(mem); | |
779 | if (!ret) | |
780 | ret = nbytes; | |
781 | return ret; | |
782 | } | |
783 | ||
784 | /* | |
785 | * Note: This should be removed if cgroup supports write-only file. | |
786 | */ | |
787 | ||
788 | static ssize_t mem_force_empty_read(struct cgroup *cont, | |
789 | struct cftype *cft, | |
790 | struct file *file, char __user *userbuf, | |
791 | size_t nbytes, loff_t *ppos) | |
792 | { | |
793 | return -EINVAL; | |
794 | } | |
795 | ||
796 | ||
8cdea7c0 BS |
797 | static struct cftype mem_cgroup_files[] = { |
798 | { | |
0eea1030 | 799 | .name = "usage_in_bytes", |
8cdea7c0 BS |
800 | .private = RES_USAGE, |
801 | .read = mem_cgroup_read, | |
802 | }, | |
803 | { | |
0eea1030 | 804 | .name = "limit_in_bytes", |
8cdea7c0 BS |
805 | .private = RES_LIMIT, |
806 | .write = mem_cgroup_write, | |
807 | .read = mem_cgroup_read, | |
808 | }, | |
809 | { | |
810 | .name = "failcnt", | |
811 | .private = RES_FAILCNT, | |
812 | .read = mem_cgroup_read, | |
813 | }, | |
8697d331 BS |
814 | { |
815 | .name = "control_type", | |
816 | .write = mem_control_type_write, | |
817 | .read = mem_control_type_read, | |
818 | }, | |
cc847582 KH |
819 | { |
820 | .name = "force_empty", | |
821 | .write = mem_force_empty_write, | |
822 | .read = mem_force_empty_read, | |
823 | }, | |
8cdea7c0 BS |
824 | }; |
825 | ||
78fb7466 PE |
826 | static struct mem_cgroup init_mem_cgroup; |
827 | ||
8cdea7c0 BS |
828 | static struct cgroup_subsys_state * |
829 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |
830 | { | |
831 | struct mem_cgroup *mem; | |
832 | ||
78fb7466 PE |
833 | if (unlikely((cont->parent) == NULL)) { |
834 | mem = &init_mem_cgroup; | |
835 | init_mm.mem_cgroup = mem; | |
836 | } else | |
837 | mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); | |
838 | ||
839 | if (mem == NULL) | |
840 | return NULL; | |
8cdea7c0 BS |
841 | |
842 | res_counter_init(&mem->res); | |
8a9f3ccd BS |
843 | INIT_LIST_HEAD(&mem->active_list); |
844 | INIT_LIST_HEAD(&mem->inactive_list); | |
66e1707b | 845 | spin_lock_init(&mem->lru_lock); |
8697d331 | 846 | mem->control_type = MEM_CGROUP_TYPE_ALL; |
8cdea7c0 BS |
847 | return &mem->css; |
848 | } | |
849 | ||
850 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |
851 | struct cgroup *cont) | |
852 | { | |
853 | kfree(mem_cgroup_from_cont(cont)); | |
854 | } | |
855 | ||
856 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | |
857 | struct cgroup *cont) | |
858 | { | |
859 | return cgroup_add_files(cont, ss, mem_cgroup_files, | |
860 | ARRAY_SIZE(mem_cgroup_files)); | |
861 | } | |
862 | ||
67e465a7 BS |
863 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
864 | struct cgroup *cont, | |
865 | struct cgroup *old_cont, | |
866 | struct task_struct *p) | |
867 | { | |
868 | struct mm_struct *mm; | |
869 | struct mem_cgroup *mem, *old_mem; | |
870 | ||
871 | mm = get_task_mm(p); | |
872 | if (mm == NULL) | |
873 | return; | |
874 | ||
875 | mem = mem_cgroup_from_cont(cont); | |
876 | old_mem = mem_cgroup_from_cont(old_cont); | |
877 | ||
878 | if (mem == old_mem) | |
879 | goto out; | |
880 | ||
881 | /* | |
882 | * Only thread group leaders are allowed to migrate, the mm_struct is | |
883 | * in effect owned by the leader | |
884 | */ | |
885 | if (p->tgid != p->pid) | |
886 | goto out; | |
887 | ||
888 | css_get(&mem->css); | |
889 | rcu_assign_pointer(mm->mem_cgroup, mem); | |
890 | css_put(&old_mem->css); | |
891 | ||
892 | out: | |
893 | mmput(mm); | |
894 | return; | |
895 | } | |
896 | ||
8cdea7c0 BS |
897 | struct cgroup_subsys mem_cgroup_subsys = { |
898 | .name = "memory", | |
899 | .subsys_id = mem_cgroup_subsys_id, | |
900 | .create = mem_cgroup_create, | |
901 | .destroy = mem_cgroup_destroy, | |
902 | .populate = mem_cgroup_populate, | |
67e465a7 | 903 | .attach = mem_cgroup_move_task, |
78fb7466 | 904 | .early_init = 1, |
8cdea7c0 | 905 | }; |