Commit | Line | Data |
---|---|---|
8cdea7c0 BS |
1 | /* memcontrol.c - Memory Controller |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
78fb7466 PE |
6 | * Copyright 2007 OpenVZ SWsoft Inc |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
8cdea7c0 BS |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #include <linux/res_counter.h> | |
21 | #include <linux/memcontrol.h> | |
22 | #include <linux/cgroup.h> | |
78fb7466 | 23 | #include <linux/mm.h> |
8a9f3ccd | 24 | #include <linux/page-flags.h> |
66e1707b | 25 | #include <linux/backing-dev.h> |
8a9f3ccd BS |
26 | #include <linux/bit_spinlock.h> |
27 | #include <linux/rcupdate.h> | |
66e1707b BS |
28 | #include <linux/swap.h> |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/fs.h> | |
8cdea7c0 | 31 | |
8697d331 BS |
32 | #include <asm/uaccess.h> |
33 | ||
8cdea7c0 | 34 | struct cgroup_subsys mem_cgroup_subsys; |
66e1707b | 35 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; |
8cdea7c0 BS |
36 | |
37 | /* | |
38 | * The memory controller data structure. The memory controller controls both | |
39 | * page cache and RSS per cgroup. We would eventually like to provide | |
40 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
41 | * to help the administrator determine what knobs to tune. | |
42 | * | |
43 | * TODO: Add a water mark for the memory controller. Reclaim will begin when | |
8a9f3ccd BS |
44 | * we hit the water mark. May be even add a low water mark, such that |
45 | * no reclaim occurs from a cgroup at it's low water mark, this is | |
46 | * a feature that will be implemented much later in the future. | |
8cdea7c0 BS |
47 | */ |
48 | struct mem_cgroup { | |
49 | struct cgroup_subsys_state css; | |
50 | /* | |
51 | * the counter to account for memory usage | |
52 | */ | |
53 | struct res_counter res; | |
78fb7466 PE |
54 | /* |
55 | * Per cgroup active and inactive list, similar to the | |
56 | * per zone LRU lists. | |
57 | * TODO: Consider making these lists per zone | |
58 | */ | |
59 | struct list_head active_list; | |
60 | struct list_head inactive_list; | |
66e1707b BS |
61 | /* |
62 | * spin_lock to protect the per cgroup LRU | |
63 | */ | |
64 | spinlock_t lru_lock; | |
8697d331 | 65 | unsigned long control_type; /* control RSS or RSS+Pagecache */ |
8cdea7c0 BS |
66 | }; |
67 | ||
8a9f3ccd BS |
68 | /* |
69 | * We use the lower bit of the page->page_cgroup pointer as a bit spin | |
70 | * lock. We need to ensure that page->page_cgroup is atleast two | |
71 | * byte aligned (based on comments from Nick Piggin) | |
72 | */ | |
73 | #define PAGE_CGROUP_LOCK_BIT 0x0 | |
74 | #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT) | |
75 | ||
8cdea7c0 BS |
76 | /* |
77 | * A page_cgroup page is associated with every page descriptor. The | |
78 | * page_cgroup helps us identify information about the cgroup | |
79 | */ | |
80 | struct page_cgroup { | |
81 | struct list_head lru; /* per cgroup LRU list */ | |
82 | struct page *page; | |
83 | struct mem_cgroup *mem_cgroup; | |
8a9f3ccd BS |
84 | atomic_t ref_cnt; /* Helpful when pages move b/w */ |
85 | /* mapped and cached states */ | |
8cdea7c0 BS |
86 | }; |
87 | ||
8697d331 BS |
88 | enum { |
89 | MEM_CGROUP_TYPE_UNSPEC = 0, | |
90 | MEM_CGROUP_TYPE_MAPPED, | |
91 | MEM_CGROUP_TYPE_CACHED, | |
92 | MEM_CGROUP_TYPE_ALL, | |
93 | MEM_CGROUP_TYPE_MAX, | |
94 | }; | |
95 | ||
96 | static struct mem_cgroup init_mem_cgroup; | |
8cdea7c0 BS |
97 | |
98 | static inline | |
99 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | |
100 | { | |
101 | return container_of(cgroup_subsys_state(cont, | |
102 | mem_cgroup_subsys_id), struct mem_cgroup, | |
103 | css); | |
104 | } | |
105 | ||
78fb7466 PE |
106 | static inline |
107 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | |
108 | { | |
109 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | |
110 | struct mem_cgroup, css); | |
111 | } | |
112 | ||
113 | void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) | |
114 | { | |
115 | struct mem_cgroup *mem; | |
116 | ||
117 | mem = mem_cgroup_from_task(p); | |
118 | css_get(&mem->css); | |
119 | mm->mem_cgroup = mem; | |
120 | } | |
121 | ||
122 | void mm_free_cgroup(struct mm_struct *mm) | |
123 | { | |
124 | css_put(&mm->mem_cgroup->css); | |
125 | } | |
126 | ||
8a9f3ccd BS |
127 | static inline int page_cgroup_locked(struct page *page) |
128 | { | |
129 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, | |
130 | &page->page_cgroup); | |
131 | } | |
132 | ||
78fb7466 PE |
133 | void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) |
134 | { | |
8a9f3ccd BS |
135 | int locked; |
136 | ||
137 | /* | |
138 | * While resetting the page_cgroup we might not hold the | |
139 | * page_cgroup lock. free_hot_cold_page() is an example | |
140 | * of such a scenario | |
141 | */ | |
142 | if (pc) | |
143 | VM_BUG_ON(!page_cgroup_locked(page)); | |
144 | locked = (page->page_cgroup & PAGE_CGROUP_LOCK); | |
145 | page->page_cgroup = ((unsigned long)pc | locked); | |
78fb7466 PE |
146 | } |
147 | ||
148 | struct page_cgroup *page_get_page_cgroup(struct page *page) | |
149 | { | |
8a9f3ccd BS |
150 | return (struct page_cgroup *) |
151 | (page->page_cgroup & ~PAGE_CGROUP_LOCK); | |
152 | } | |
153 | ||
8697d331 | 154 | static void __always_inline lock_page_cgroup(struct page *page) |
8a9f3ccd BS |
155 | { |
156 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
157 | VM_BUG_ON(!page_cgroup_locked(page)); | |
158 | } | |
159 | ||
8697d331 | 160 | static void __always_inline unlock_page_cgroup(struct page *page) |
8a9f3ccd BS |
161 | { |
162 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | |
163 | } | |
164 | ||
9175e031 KH |
165 | /* |
166 | * Tie new page_cgroup to struct page under lock_page_cgroup() | |
167 | * This can fail if the page has been tied to a page_cgroup. | |
168 | * If success, returns 0. | |
169 | */ | |
170 | static inline int | |
171 | page_cgroup_assign_new_page_cgroup(struct page *page, struct page_cgroup *pc) | |
172 | { | |
173 | int ret = 0; | |
174 | ||
175 | lock_page_cgroup(page); | |
176 | if (!page_get_page_cgroup(page)) | |
177 | page_assign_page_cgroup(page, pc); | |
178 | else /* A page is tied to other pc. */ | |
179 | ret = 1; | |
180 | unlock_page_cgroup(page); | |
181 | return ret; | |
182 | } | |
183 | ||
184 | /* | |
185 | * Clear page->page_cgroup member under lock_page_cgroup(). | |
186 | * If given "pc" value is different from one page->page_cgroup, | |
187 | * page->cgroup is not cleared. | |
188 | * Returns a value of page->page_cgroup at lock taken. | |
189 | * A can can detect failure of clearing by following | |
190 | * clear_page_cgroup(page, pc) == pc | |
191 | */ | |
192 | ||
193 | static inline struct page_cgroup * | |
194 | clear_page_cgroup(struct page *page, struct page_cgroup *pc) | |
195 | { | |
196 | struct page_cgroup *ret; | |
197 | /* lock and clear */ | |
198 | lock_page_cgroup(page); | |
199 | ret = page_get_page_cgroup(page); | |
200 | if (likely(ret == pc)) | |
201 | page_assign_page_cgroup(page, NULL); | |
202 | unlock_page_cgroup(page); | |
203 | return ret; | |
204 | } | |
205 | ||
206 | ||
8697d331 | 207 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) |
66e1707b BS |
208 | { |
209 | if (active) | |
210 | list_move(&pc->lru, &pc->mem_cgroup->active_list); | |
211 | else | |
212 | list_move(&pc->lru, &pc->mem_cgroup->inactive_list); | |
213 | } | |
214 | ||
4c4a2214 DR |
215 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) |
216 | { | |
217 | int ret; | |
218 | ||
219 | task_lock(task); | |
220 | ret = task->mm && mm_cgroup(task->mm) == mem; | |
221 | task_unlock(task); | |
222 | return ret; | |
223 | } | |
224 | ||
66e1707b BS |
225 | /* |
226 | * This routine assumes that the appropriate zone's lru lock is already held | |
227 | */ | |
228 | void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
229 | { | |
230 | struct mem_cgroup *mem; | |
231 | if (!pc) | |
232 | return; | |
233 | ||
234 | mem = pc->mem_cgroup; | |
235 | ||
236 | spin_lock(&mem->lru_lock); | |
237 | __mem_cgroup_move_lists(pc, active); | |
238 | spin_unlock(&mem->lru_lock); | |
239 | } | |
240 | ||
241 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |
242 | struct list_head *dst, | |
243 | unsigned long *scanned, int order, | |
244 | int mode, struct zone *z, | |
245 | struct mem_cgroup *mem_cont, | |
246 | int active) | |
247 | { | |
248 | unsigned long nr_taken = 0; | |
249 | struct page *page; | |
250 | unsigned long scan; | |
251 | LIST_HEAD(pc_list); | |
252 | struct list_head *src; | |
ff7283fa | 253 | struct page_cgroup *pc, *tmp; |
66e1707b BS |
254 | |
255 | if (active) | |
256 | src = &mem_cont->active_list; | |
257 | else | |
258 | src = &mem_cont->inactive_list; | |
259 | ||
260 | spin_lock(&mem_cont->lru_lock); | |
ff7283fa KH |
261 | scan = 0; |
262 | list_for_each_entry_safe_reverse(pc, tmp, src, lru) { | |
436c6541 | 263 | if (scan >= nr_to_scan) |
ff7283fa | 264 | break; |
66e1707b BS |
265 | page = pc->page; |
266 | VM_BUG_ON(!pc); | |
267 | ||
436c6541 | 268 | if (unlikely(!PageLRU(page))) |
ff7283fa | 269 | continue; |
ff7283fa | 270 | |
66e1707b BS |
271 | if (PageActive(page) && !active) { |
272 | __mem_cgroup_move_lists(pc, true); | |
66e1707b BS |
273 | continue; |
274 | } | |
275 | if (!PageActive(page) && active) { | |
276 | __mem_cgroup_move_lists(pc, false); | |
66e1707b BS |
277 | continue; |
278 | } | |
279 | ||
280 | /* | |
281 | * Reclaim, per zone | |
282 | * TODO: make the active/inactive lists per zone | |
283 | */ | |
284 | if (page_zone(page) != z) | |
285 | continue; | |
286 | ||
436c6541 HD |
287 | scan++; |
288 | list_move(&pc->lru, &pc_list); | |
66e1707b BS |
289 | |
290 | if (__isolate_lru_page(page, mode) == 0) { | |
291 | list_move(&page->lru, dst); | |
292 | nr_taken++; | |
293 | } | |
294 | } | |
295 | ||
296 | list_splice(&pc_list, src); | |
297 | spin_unlock(&mem_cont->lru_lock); | |
298 | ||
299 | *scanned = scan; | |
300 | return nr_taken; | |
301 | } | |
302 | ||
8a9f3ccd BS |
303 | /* |
304 | * Charge the memory controller for page usage. | |
305 | * Return | |
306 | * 0 if the charge was successful | |
307 | * < 0 if the cgroup is over its limit | |
308 | */ | |
e1a1cd59 BS |
309 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
310 | gfp_t gfp_mask) | |
8a9f3ccd BS |
311 | { |
312 | struct mem_cgroup *mem; | |
9175e031 | 313 | struct page_cgroup *pc; |
66e1707b BS |
314 | unsigned long flags; |
315 | unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | |
8a9f3ccd BS |
316 | |
317 | /* | |
318 | * Should page_cgroup's go to their own slab? | |
319 | * One could optimize the performance of the charging routine | |
320 | * by saving a bit in the page_flags and using it as a lock | |
321 | * to see if the cgroup page already has a page_cgroup associated | |
322 | * with it | |
323 | */ | |
66e1707b | 324 | retry: |
8a9f3ccd BS |
325 | lock_page_cgroup(page); |
326 | pc = page_get_page_cgroup(page); | |
327 | /* | |
328 | * The page_cgroup exists and the page has already been accounted | |
329 | */ | |
330 | if (pc) { | |
66e1707b BS |
331 | if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { |
332 | /* this page is under being uncharged ? */ | |
333 | unlock_page_cgroup(page); | |
334 | cpu_relax(); | |
335 | goto retry; | |
9175e031 KH |
336 | } else { |
337 | unlock_page_cgroup(page); | |
66e1707b | 338 | goto done; |
9175e031 | 339 | } |
8a9f3ccd BS |
340 | } |
341 | ||
342 | unlock_page_cgroup(page); | |
343 | ||
e1a1cd59 | 344 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); |
8a9f3ccd BS |
345 | if (pc == NULL) |
346 | goto err; | |
347 | ||
348 | rcu_read_lock(); | |
349 | /* | |
350 | * We always charge the cgroup the mm_struct belongs to | |
351 | * the mm_struct's mem_cgroup changes on task migration if the | |
352 | * thread group leader migrates. It's possible that mm is not | |
353 | * set, if so charge the init_mm (happens for pagecache usage). | |
354 | */ | |
355 | if (!mm) | |
356 | mm = &init_mm; | |
357 | ||
358 | mem = rcu_dereference(mm->mem_cgroup); | |
359 | /* | |
360 | * For every charge from the cgroup, increment reference | |
361 | * count | |
362 | */ | |
363 | css_get(&mem->css); | |
364 | rcu_read_unlock(); | |
365 | ||
366 | /* | |
367 | * If we created the page_cgroup, we should free it on exceeding | |
368 | * the cgroup limit. | |
369 | */ | |
0eea1030 | 370 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { |
e1a1cd59 BS |
371 | bool is_atomic = gfp_mask & GFP_ATOMIC; |
372 | /* | |
373 | * We cannot reclaim under GFP_ATOMIC, fail the charge | |
374 | */ | |
375 | if (is_atomic) | |
376 | goto noreclaim; | |
377 | ||
378 | if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | |
66e1707b BS |
379 | continue; |
380 | ||
381 | /* | |
382 | * try_to_free_mem_cgroup_pages() might not give us a full | |
383 | * picture of reclaim. Some pages are reclaimed and might be | |
384 | * moved to swap cache or just unmapped from the cgroup. | |
385 | * Check the limit again to see if the reclaim reduced the | |
386 | * current usage of the cgroup before giving up | |
387 | */ | |
388 | if (res_counter_check_under_limit(&mem->res)) | |
389 | continue; | |
390 | /* | |
391 | * Since we control both RSS and cache, we end up with a | |
392 | * very interesting scenario where we end up reclaiming | |
393 | * memory (essentially RSS), since the memory is pushed | |
394 | * to swap cache, we eventually end up adding those | |
395 | * pages back to our list. Hence we give ourselves a | |
396 | * few chances before we fail | |
397 | */ | |
398 | else if (nr_retries--) { | |
399 | congestion_wait(WRITE, HZ/10); | |
400 | continue; | |
401 | } | |
e1a1cd59 | 402 | noreclaim: |
8a9f3ccd | 403 | css_put(&mem->css); |
e1a1cd59 BS |
404 | if (!is_atomic) |
405 | mem_cgroup_out_of_memory(mem, GFP_KERNEL); | |
8a9f3ccd BS |
406 | goto free_pc; |
407 | } | |
408 | ||
8a9f3ccd BS |
409 | atomic_set(&pc->ref_cnt, 1); |
410 | pc->mem_cgroup = mem; | |
411 | pc->page = page; | |
9175e031 KH |
412 | if (page_cgroup_assign_new_page_cgroup(page, pc)) { |
413 | /* | |
414 | * an another charge is added to this page already. | |
415 | * we do take lock_page_cgroup(page) again and read | |
416 | * page->cgroup, increment refcnt.... just retry is OK. | |
417 | */ | |
418 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
419 | css_put(&mem->css); | |
420 | kfree(pc); | |
421 | goto retry; | |
422 | } | |
8a9f3ccd | 423 | |
66e1707b BS |
424 | spin_lock_irqsave(&mem->lru_lock, flags); |
425 | list_add(&pc->lru, &mem->active_list); | |
426 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
427 | ||
8a9f3ccd | 428 | done: |
8a9f3ccd BS |
429 | return 0; |
430 | free_pc: | |
431 | kfree(pc); | |
8a9f3ccd | 432 | err: |
8a9f3ccd BS |
433 | return -ENOMEM; |
434 | } | |
435 | ||
8697d331 BS |
436 | /* |
437 | * See if the cached pages should be charged at all? | |
438 | */ | |
e1a1cd59 BS |
439 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
440 | gfp_t gfp_mask) | |
8697d331 BS |
441 | { |
442 | struct mem_cgroup *mem; | |
443 | if (!mm) | |
444 | mm = &init_mm; | |
445 | ||
446 | mem = rcu_dereference(mm->mem_cgroup); | |
447 | if (mem->control_type == MEM_CGROUP_TYPE_ALL) | |
e1a1cd59 | 448 | return mem_cgroup_charge(page, mm, gfp_mask); |
8697d331 BS |
449 | else |
450 | return 0; | |
451 | } | |
452 | ||
8a9f3ccd BS |
453 | /* |
454 | * Uncharging is always a welcome operation, we never complain, simply | |
455 | * uncharge. | |
456 | */ | |
457 | void mem_cgroup_uncharge(struct page_cgroup *pc) | |
458 | { | |
459 | struct mem_cgroup *mem; | |
460 | struct page *page; | |
66e1707b | 461 | unsigned long flags; |
8a9f3ccd | 462 | |
8697d331 BS |
463 | /* |
464 | * This can handle cases when a page is not charged at all and we | |
465 | * are switching between handling the control_type. | |
466 | */ | |
8a9f3ccd BS |
467 | if (!pc) |
468 | return; | |
469 | ||
470 | if (atomic_dec_and_test(&pc->ref_cnt)) { | |
471 | page = pc->page; | |
9175e031 KH |
472 | /* |
473 | * get page->cgroup and clear it under lock. | |
cc847582 | 474 | * force_empty can drop page->cgroup without checking refcnt. |
9175e031 KH |
475 | */ |
476 | if (clear_page_cgroup(page, pc) == pc) { | |
477 | mem = pc->mem_cgroup; | |
478 | css_put(&mem->css); | |
479 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
480 | spin_lock_irqsave(&mem->lru_lock, flags); | |
481 | list_del_init(&pc->lru); | |
482 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
483 | kfree(pc); | |
9175e031 | 484 | } |
8a9f3ccd | 485 | } |
78fb7466 | 486 | } |
ae41be37 KH |
487 | /* |
488 | * Returns non-zero if a page (under migration) has valid page_cgroup member. | |
489 | * Refcnt of page_cgroup is incremented. | |
490 | */ | |
491 | ||
492 | int mem_cgroup_prepare_migration(struct page *page) | |
493 | { | |
494 | struct page_cgroup *pc; | |
495 | int ret = 0; | |
496 | lock_page_cgroup(page); | |
497 | pc = page_get_page_cgroup(page); | |
498 | if (pc && atomic_inc_not_zero(&pc->ref_cnt)) | |
499 | ret = 1; | |
500 | unlock_page_cgroup(page); | |
501 | return ret; | |
502 | } | |
503 | ||
504 | void mem_cgroup_end_migration(struct page *page) | |
505 | { | |
506 | struct page_cgroup *pc = page_get_page_cgroup(page); | |
507 | mem_cgroup_uncharge(pc); | |
508 | } | |
509 | /* | |
510 | * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. | |
511 | * And no race with uncharge() routines because page_cgroup for *page* | |
512 | * has extra one reference by mem_cgroup_prepare_migration. | |
513 | */ | |
514 | ||
515 | void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |
516 | { | |
517 | struct page_cgroup *pc; | |
518 | retry: | |
519 | pc = page_get_page_cgroup(page); | |
520 | if (!pc) | |
521 | return; | |
522 | if (clear_page_cgroup(page, pc) != pc) | |
523 | goto retry; | |
524 | pc->page = newpage; | |
525 | lock_page_cgroup(newpage); | |
526 | page_assign_page_cgroup(newpage, pc); | |
527 | unlock_page_cgroup(newpage); | |
528 | return; | |
529 | } | |
78fb7466 | 530 | |
cc847582 KH |
531 | /* |
532 | * This routine traverse page_cgroup in given list and drop them all. | |
533 | * This routine ignores page_cgroup->ref_cnt. | |
534 | * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | |
535 | */ | |
536 | #define FORCE_UNCHARGE_BATCH (128) | |
537 | static void | |
538 | mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list) | |
539 | { | |
540 | struct page_cgroup *pc; | |
541 | struct page *page; | |
542 | int count; | |
543 | unsigned long flags; | |
544 | ||
545 | retry: | |
546 | count = FORCE_UNCHARGE_BATCH; | |
547 | spin_lock_irqsave(&mem->lru_lock, flags); | |
548 | ||
549 | while (--count && !list_empty(list)) { | |
550 | pc = list_entry(list->prev, struct page_cgroup, lru); | |
551 | page = pc->page; | |
552 | /* Avoid race with charge */ | |
553 | atomic_set(&pc->ref_cnt, 0); | |
554 | if (clear_page_cgroup(page, pc) == pc) { | |
555 | css_put(&mem->css); | |
556 | res_counter_uncharge(&mem->res, PAGE_SIZE); | |
557 | list_del_init(&pc->lru); | |
558 | kfree(pc); | |
559 | } else /* being uncharged ? ...do relax */ | |
560 | break; | |
561 | } | |
562 | spin_unlock_irqrestore(&mem->lru_lock, flags); | |
563 | if (!list_empty(list)) { | |
564 | cond_resched(); | |
565 | goto retry; | |
566 | } | |
567 | return; | |
568 | } | |
569 | ||
570 | /* | |
571 | * make mem_cgroup's charge to be 0 if there is no task. | |
572 | * This enables deleting this mem_cgroup. | |
573 | */ | |
574 | ||
575 | int mem_cgroup_force_empty(struct mem_cgroup *mem) | |
576 | { | |
577 | int ret = -EBUSY; | |
578 | css_get(&mem->css); | |
579 | /* | |
580 | * page reclaim code (kswapd etc..) will move pages between | |
581 | ` * active_list <-> inactive_list while we don't take a lock. | |
582 | * So, we have to do loop here until all lists are empty. | |
583 | */ | |
584 | while (!(list_empty(&mem->active_list) && | |
585 | list_empty(&mem->inactive_list))) { | |
586 | if (atomic_read(&mem->css.cgroup->count) > 0) | |
587 | goto out; | |
588 | /* drop all page_cgroup in active_list */ | |
589 | mem_cgroup_force_empty_list(mem, &mem->active_list); | |
590 | /* drop all page_cgroup in inactive_list */ | |
591 | mem_cgroup_force_empty_list(mem, &mem->inactive_list); | |
592 | } | |
593 | ret = 0; | |
594 | out: | |
595 | css_put(&mem->css); | |
596 | return ret; | |
597 | } | |
598 | ||
599 | ||
600 | ||
0eea1030 BS |
601 | int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) |
602 | { | |
603 | *tmp = memparse(buf, &buf); | |
604 | if (*buf != '\0') | |
605 | return -EINVAL; | |
606 | ||
607 | /* | |
608 | * Round up the value to the closest page size | |
609 | */ | |
610 | *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT; | |
611 | return 0; | |
612 | } | |
613 | ||
614 | static ssize_t mem_cgroup_read(struct cgroup *cont, | |
615 | struct cftype *cft, struct file *file, | |
616 | char __user *userbuf, size_t nbytes, loff_t *ppos) | |
8cdea7c0 BS |
617 | { |
618 | return res_counter_read(&mem_cgroup_from_cont(cont)->res, | |
0eea1030 BS |
619 | cft->private, userbuf, nbytes, ppos, |
620 | NULL); | |
8cdea7c0 BS |
621 | } |
622 | ||
623 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | |
624 | struct file *file, const char __user *userbuf, | |
625 | size_t nbytes, loff_t *ppos) | |
626 | { | |
627 | return res_counter_write(&mem_cgroup_from_cont(cont)->res, | |
0eea1030 BS |
628 | cft->private, userbuf, nbytes, ppos, |
629 | mem_cgroup_write_strategy); | |
8cdea7c0 BS |
630 | } |
631 | ||
8697d331 BS |
632 | static ssize_t mem_control_type_write(struct cgroup *cont, |
633 | struct cftype *cft, struct file *file, | |
634 | const char __user *userbuf, | |
635 | size_t nbytes, loff_t *pos) | |
636 | { | |
637 | int ret; | |
638 | char *buf, *end; | |
639 | unsigned long tmp; | |
640 | struct mem_cgroup *mem; | |
641 | ||
642 | mem = mem_cgroup_from_cont(cont); | |
643 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | |
644 | ret = -ENOMEM; | |
645 | if (buf == NULL) | |
646 | goto out; | |
647 | ||
648 | buf[nbytes] = 0; | |
649 | ret = -EFAULT; | |
650 | if (copy_from_user(buf, userbuf, nbytes)) | |
651 | goto out_free; | |
652 | ||
653 | ret = -EINVAL; | |
654 | tmp = simple_strtoul(buf, &end, 10); | |
655 | if (*end != '\0') | |
656 | goto out_free; | |
657 | ||
658 | if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) | |
659 | goto out_free; | |
660 | ||
661 | mem->control_type = tmp; | |
662 | ret = nbytes; | |
663 | out_free: | |
664 | kfree(buf); | |
665 | out: | |
666 | return ret; | |
667 | } | |
668 | ||
669 | static ssize_t mem_control_type_read(struct cgroup *cont, | |
670 | struct cftype *cft, | |
671 | struct file *file, char __user *userbuf, | |
672 | size_t nbytes, loff_t *ppos) | |
673 | { | |
674 | unsigned long val; | |
675 | char buf[64], *s; | |
676 | struct mem_cgroup *mem; | |
677 | ||
678 | mem = mem_cgroup_from_cont(cont); | |
679 | s = buf; | |
680 | val = mem->control_type; | |
681 | s += sprintf(s, "%lu\n", val); | |
682 | return simple_read_from_buffer((void __user *)userbuf, nbytes, | |
683 | ppos, buf, s - buf); | |
684 | } | |
685 | ||
cc847582 KH |
686 | |
687 | static ssize_t mem_force_empty_write(struct cgroup *cont, | |
688 | struct cftype *cft, struct file *file, | |
689 | const char __user *userbuf, | |
690 | size_t nbytes, loff_t *ppos) | |
691 | { | |
692 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | |
693 | int ret; | |
694 | ret = mem_cgroup_force_empty(mem); | |
695 | if (!ret) | |
696 | ret = nbytes; | |
697 | return ret; | |
698 | } | |
699 | ||
700 | /* | |
701 | * Note: This should be removed if cgroup supports write-only file. | |
702 | */ | |
703 | ||
704 | static ssize_t mem_force_empty_read(struct cgroup *cont, | |
705 | struct cftype *cft, | |
706 | struct file *file, char __user *userbuf, | |
707 | size_t nbytes, loff_t *ppos) | |
708 | { | |
709 | return -EINVAL; | |
710 | } | |
711 | ||
712 | ||
8cdea7c0 BS |
713 | static struct cftype mem_cgroup_files[] = { |
714 | { | |
0eea1030 | 715 | .name = "usage_in_bytes", |
8cdea7c0 BS |
716 | .private = RES_USAGE, |
717 | .read = mem_cgroup_read, | |
718 | }, | |
719 | { | |
0eea1030 | 720 | .name = "limit_in_bytes", |
8cdea7c0 BS |
721 | .private = RES_LIMIT, |
722 | .write = mem_cgroup_write, | |
723 | .read = mem_cgroup_read, | |
724 | }, | |
725 | { | |
726 | .name = "failcnt", | |
727 | .private = RES_FAILCNT, | |
728 | .read = mem_cgroup_read, | |
729 | }, | |
8697d331 BS |
730 | { |
731 | .name = "control_type", | |
732 | .write = mem_control_type_write, | |
733 | .read = mem_control_type_read, | |
734 | }, | |
cc847582 KH |
735 | { |
736 | .name = "force_empty", | |
737 | .write = mem_force_empty_write, | |
738 | .read = mem_force_empty_read, | |
739 | }, | |
8cdea7c0 BS |
740 | }; |
741 | ||
78fb7466 PE |
742 | static struct mem_cgroup init_mem_cgroup; |
743 | ||
8cdea7c0 BS |
744 | static struct cgroup_subsys_state * |
745 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |
746 | { | |
747 | struct mem_cgroup *mem; | |
748 | ||
78fb7466 PE |
749 | if (unlikely((cont->parent) == NULL)) { |
750 | mem = &init_mem_cgroup; | |
751 | init_mm.mem_cgroup = mem; | |
752 | } else | |
753 | mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); | |
754 | ||
755 | if (mem == NULL) | |
756 | return NULL; | |
8cdea7c0 BS |
757 | |
758 | res_counter_init(&mem->res); | |
8a9f3ccd BS |
759 | INIT_LIST_HEAD(&mem->active_list); |
760 | INIT_LIST_HEAD(&mem->inactive_list); | |
66e1707b | 761 | spin_lock_init(&mem->lru_lock); |
8697d331 | 762 | mem->control_type = MEM_CGROUP_TYPE_ALL; |
8cdea7c0 BS |
763 | return &mem->css; |
764 | } | |
765 | ||
766 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |
767 | struct cgroup *cont) | |
768 | { | |
769 | kfree(mem_cgroup_from_cont(cont)); | |
770 | } | |
771 | ||
772 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | |
773 | struct cgroup *cont) | |
774 | { | |
775 | return cgroup_add_files(cont, ss, mem_cgroup_files, | |
776 | ARRAY_SIZE(mem_cgroup_files)); | |
777 | } | |
778 | ||
67e465a7 BS |
779 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
780 | struct cgroup *cont, | |
781 | struct cgroup *old_cont, | |
782 | struct task_struct *p) | |
783 | { | |
784 | struct mm_struct *mm; | |
785 | struct mem_cgroup *mem, *old_mem; | |
786 | ||
787 | mm = get_task_mm(p); | |
788 | if (mm == NULL) | |
789 | return; | |
790 | ||
791 | mem = mem_cgroup_from_cont(cont); | |
792 | old_mem = mem_cgroup_from_cont(old_cont); | |
793 | ||
794 | if (mem == old_mem) | |
795 | goto out; | |
796 | ||
797 | /* | |
798 | * Only thread group leaders are allowed to migrate, the mm_struct is | |
799 | * in effect owned by the leader | |
800 | */ | |
801 | if (p->tgid != p->pid) | |
802 | goto out; | |
803 | ||
804 | css_get(&mem->css); | |
805 | rcu_assign_pointer(mm->mem_cgroup, mem); | |
806 | css_put(&old_mem->css); | |
807 | ||
808 | out: | |
809 | mmput(mm); | |
810 | return; | |
811 | } | |
812 | ||
8cdea7c0 BS |
813 | struct cgroup_subsys mem_cgroup_subsys = { |
814 | .name = "memory", | |
815 | .subsys_id = mem_cgroup_subsys_id, | |
816 | .create = mem_cgroup_create, | |
817 | .destroy = mem_cgroup_destroy, | |
818 | .populate = mem_cgroup_populate, | |
67e465a7 | 819 | .attach = mem_cgroup_move_task, |
78fb7466 | 820 | .early_init = 1, |
8cdea7c0 | 821 | }; |