1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
24 #include <linux/smp.h>
25 #include <linux/page-flags.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bit_spinlock.h>
28 #include <linux/rcupdate.h>
29 #include <linux/swap.h>
30 #include <linux/spinlock.h>
32 #include <linux/seq_file.h>
34 #include <asm/uaccess.h>
36 struct cgroup_subsys mem_cgroup_subsys;
37 static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
40 * Statistics for memory cgroup.
42 enum mem_cgroup_stat_index {
44 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
46 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
47 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
49 MEM_CGROUP_STAT_NSTATS,
52 struct mem_cgroup_stat_cpu {
53 s64 count[MEM_CGROUP_STAT_NSTATS];
54 } ____cacheline_aligned_in_smp;
56 struct mem_cgroup_stat {
57 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
61 * For accounting under irq disable, no need for increment preempt count.
63 static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
64 enum mem_cgroup_stat_index idx, int val)
66 int cpu = smp_processor_id();
67 stat->cpustat[cpu].count[idx] += val;
70 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
71 enum mem_cgroup_stat_index idx)
75 for_each_possible_cpu(cpu)
76 ret += stat->cpustat[cpu].count[idx];
81 * per-zone information in memory controller.
84 enum mem_cgroup_zstat_index {
85 MEM_CGROUP_ZSTAT_ACTIVE,
86 MEM_CGROUP_ZSTAT_INACTIVE,
91 struct mem_cgroup_per_zone {
93 * spin_lock to protect the per cgroup LRU
96 struct list_head active_list;
97 struct list_head inactive_list;
98 unsigned long count[NR_MEM_CGROUP_ZSTAT];
100 /* Macro for accessing counter */
101 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
103 struct mem_cgroup_per_node {
104 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
107 struct mem_cgroup_lru_info {
108 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
112 * The memory controller data structure. The memory controller controls both
113 * page cache and RSS per cgroup. We would eventually like to provide
114 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
115 * to help the administrator determine what knobs to tune.
117 * TODO: Add a water mark for the memory controller. Reclaim will begin when
118 * we hit the water mark. May be even add a low water mark, such that
119 * no reclaim occurs from a cgroup at it's low water mark, this is
120 * a feature that will be implemented much later in the future.
123 struct cgroup_subsys_state css;
125 * the counter to account for memory usage
127 struct res_counter res;
129 * Per cgroup active and inactive list, similar to the
130 * per zone LRU lists.
132 struct mem_cgroup_lru_info info;
134 int prev_priority; /* for recording reclaim priority */
138 struct mem_cgroup_stat stat;
140 static struct mem_cgroup init_mem_cgroup;
143 * We use the lower bit of the page->page_cgroup pointer as a bit spin
144 * lock. We need to ensure that page->page_cgroup is at least two
145 * byte aligned (based on comments from Nick Piggin). But since
146 * bit_spin_lock doesn't actually set that lock bit in a non-debug
147 * uniprocessor kernel, we should avoid setting it here too.
149 #define PAGE_CGROUP_LOCK_BIT 0x0
150 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
151 #define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
153 #define PAGE_CGROUP_LOCK 0x0
157 * A page_cgroup page is associated with every page descriptor. The
158 * page_cgroup helps us identify information about the cgroup
161 struct list_head lru; /* per cgroup LRU list */
163 struct mem_cgroup *mem_cgroup;
164 atomic_t ref_cnt; /* Helpful when pages move b/w */
165 /* mapped and cached states */
168 #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
169 #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
171 static int page_cgroup_nid(struct page_cgroup *pc)
173 return page_to_nid(pc->page);
176 static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
178 return page_zonenum(pc->page);
182 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
183 MEM_CGROUP_CHARGE_TYPE_MAPPED,
187 * Always modified under lru lock. Then, not necessary to preempt_disable()
189 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
192 int val = (charge)? 1 : -1;
193 struct mem_cgroup_stat *stat = &mem->stat;
195 VM_BUG_ON(!irqs_disabled());
196 if (flags & PAGE_CGROUP_FLAG_CACHE)
197 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
199 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
202 static struct mem_cgroup_per_zone *
203 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
205 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
208 static struct mem_cgroup_per_zone *
209 page_cgroup_zoneinfo(struct page_cgroup *pc)
211 struct mem_cgroup *mem = pc->mem_cgroup;
212 int nid = page_cgroup_nid(pc);
213 int zid = page_cgroup_zid(pc);
215 return mem_cgroup_zoneinfo(mem, nid, zid);
218 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
219 enum mem_cgroup_zstat_index idx)
222 struct mem_cgroup_per_zone *mz;
225 for_each_online_node(nid)
226 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
227 mz = mem_cgroup_zoneinfo(mem, nid, zid);
228 total += MEM_CGROUP_ZSTAT(mz, idx);
233 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
235 return container_of(cgroup_subsys_state(cont,
236 mem_cgroup_subsys_id), struct mem_cgroup,
240 static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
242 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
243 struct mem_cgroup, css);
246 void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
248 struct mem_cgroup *mem;
250 mem = mem_cgroup_from_task(p);
252 mm->mem_cgroup = mem;
255 void mm_free_cgroup(struct mm_struct *mm)
257 css_put(&mm->mem_cgroup->css);
260 static inline int page_cgroup_locked(struct page *page)
262 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
265 static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
267 VM_BUG_ON(!page_cgroup_locked(page));
268 page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
271 struct page_cgroup *page_get_page_cgroup(struct page *page)
273 return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
276 static void lock_page_cgroup(struct page *page)
278 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
281 static void unlock_page_cgroup(struct page *page)
283 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
287 * Clear page->page_cgroup member under lock_page_cgroup().
288 * If given "pc" value is different from one page->page_cgroup,
289 * page->cgroup is not cleared.
290 * Returns a value of page->page_cgroup at lock taken.
291 * A can can detect failure of clearing by following
292 * clear_page_cgroup(page, pc) == pc
294 static struct page_cgroup *clear_page_cgroup(struct page *page,
295 struct page_cgroup *pc)
297 struct page_cgroup *ret;
299 lock_page_cgroup(page);
300 ret = page_get_page_cgroup(page);
301 if (likely(ret == pc))
302 page_assign_page_cgroup(page, NULL);
303 unlock_page_cgroup(page);
307 static void __mem_cgroup_remove_list(struct page_cgroup *pc)
309 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
310 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
313 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
315 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
317 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
318 list_del_init(&pc->lru);
321 static void __mem_cgroup_add_list(struct page_cgroup *pc)
323 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
324 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
327 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
328 list_add(&pc->lru, &mz->inactive_list);
330 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
331 list_add(&pc->lru, &mz->active_list);
333 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
336 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
338 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
339 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
342 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
344 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
347 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
348 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
349 list_move(&pc->lru, &mz->active_list);
351 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
352 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
353 list_move(&pc->lru, &mz->inactive_list);
357 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
362 ret = task->mm && mm_match_cgroup(task->mm, mem);
368 * This routine assumes that the appropriate zone's lru lock is already held
370 void mem_cgroup_move_lists(struct page *page, bool active)
372 struct page_cgroup *pc;
373 struct mem_cgroup_per_zone *mz;
376 pc = page_get_page_cgroup(page);
380 mz = page_cgroup_zoneinfo(pc);
381 spin_lock_irqsave(&mz->lru_lock, flags);
382 __mem_cgroup_move_lists(pc, active);
383 spin_unlock_irqrestore(&mz->lru_lock, flags);
387 * Calculate mapped_ratio under memory controller. This will be used in
388 * vmscan.c for deteremining we have to reclaim mapped pages.
390 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
395 * usage is recorded in bytes. But, here, we assume the number of
396 * physical pages can be represented by "long" on any arch.
398 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
399 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
400 return (int)((rss * 100L) / total);
404 * This function is called from vmscan.c. In page reclaiming loop. balance
405 * between active and inactive list is calculated. For memory controller
406 * page reclaiming, we should use using mem_cgroup's imbalance rather than
407 * zone's global lru imbalance.
409 long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
411 unsigned long active, inactive;
412 /* active and inactive are the number of pages. 'long' is ok.*/
413 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
414 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
415 return (long) (active / (inactive + 1));
419 * prev_priority control...this will be used in memory reclaim path.
421 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
423 return mem->prev_priority;
426 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
428 if (priority < mem->prev_priority)
429 mem->prev_priority = priority;
432 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
434 mem->prev_priority = priority;
438 * Calculate # of pages to be scanned in this priority/zone.
441 * priority starts from "DEF_PRIORITY" and decremented in each loop.
442 * (see include/linux/mmzone.h)
445 long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
446 struct zone *zone, int priority)
449 int nid = zone->zone_pgdat->node_id;
450 int zid = zone_idx(zone);
451 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
453 nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
454 return (nr_active >> priority);
457 long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
458 struct zone *zone, int priority)
461 int nid = zone->zone_pgdat->node_id;
462 int zid = zone_idx(zone);
463 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
465 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
466 return (nr_inactive >> priority);
469 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
470 struct list_head *dst,
471 unsigned long *scanned, int order,
472 int mode, struct zone *z,
473 struct mem_cgroup *mem_cont,
476 unsigned long nr_taken = 0;
480 struct list_head *src;
481 struct page_cgroup *pc, *tmp;
482 int nid = z->zone_pgdat->node_id;
483 int zid = zone_idx(z);
484 struct mem_cgroup_per_zone *mz;
486 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
488 src = &mz->active_list;
490 src = &mz->inactive_list;
493 spin_lock(&mz->lru_lock);
495 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
496 if (scan >= nr_to_scan)
500 if (unlikely(!PageLRU(page)))
503 if (PageActive(page) && !active) {
504 __mem_cgroup_move_lists(pc, true);
507 if (!PageActive(page) && active) {
508 __mem_cgroup_move_lists(pc, false);
513 list_move(&pc->lru, &pc_list);
515 if (__isolate_lru_page(page, mode) == 0) {
516 list_move(&page->lru, dst);
521 list_splice(&pc_list, src);
522 spin_unlock(&mz->lru_lock);
529 * Charge the memory controller for page usage.
531 * 0 if the charge was successful
532 * < 0 if the cgroup is over its limit
534 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
535 gfp_t gfp_mask, enum charge_type ctype)
537 struct mem_cgroup *mem;
538 struct page_cgroup *pc;
540 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
541 struct mem_cgroup_per_zone *mz;
544 * Should page_cgroup's go to their own slab?
545 * One could optimize the performance of the charging routine
546 * by saving a bit in the page_flags and using it as a lock
547 * to see if the cgroup page already has a page_cgroup associated
551 lock_page_cgroup(page);
552 pc = page_get_page_cgroup(page);
554 * The page_cgroup exists and
555 * the page has already been accounted.
558 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
559 /* this page is under being uncharged ? */
560 unlock_page_cgroup(page);
564 unlock_page_cgroup(page);
568 unlock_page_cgroup(page);
570 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
575 * We always charge the cgroup the mm_struct belongs to.
576 * The mm_struct's mem_cgroup changes on task migration if the
577 * thread group leader migrates. It's possible that mm is not
578 * set, if so charge the init_mm (happens for pagecache usage).
584 mem = rcu_dereference(mm->mem_cgroup);
586 * For every charge from the cgroup, increment reference count
591 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
592 if (!(gfp_mask & __GFP_WAIT))
595 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
599 * try_to_free_mem_cgroup_pages() might not give us a full
600 * picture of reclaim. Some pages are reclaimed and might be
601 * moved to swap cache or just unmapped from the cgroup.
602 * Check the limit again to see if the reclaim reduced the
603 * current usage of the cgroup before giving up
605 if (res_counter_check_under_limit(&mem->res))
609 mem_cgroup_out_of_memory(mem, gfp_mask);
612 congestion_wait(WRITE, HZ/10);
615 atomic_set(&pc->ref_cnt, 1);
616 pc->mem_cgroup = mem;
618 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
619 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
620 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
622 lock_page_cgroup(page);
623 if (page_get_page_cgroup(page)) {
624 unlock_page_cgroup(page);
626 * Another charge has been added to this page already.
627 * We take lock_page_cgroup(page) again and read
628 * page->cgroup, increment refcnt.... just retry is OK.
630 res_counter_uncharge(&mem->res, PAGE_SIZE);
635 page_assign_page_cgroup(page, pc);
636 unlock_page_cgroup(page);
638 mz = page_cgroup_zoneinfo(pc);
639 spin_lock_irqsave(&mz->lru_lock, flags);
640 __mem_cgroup_add_list(pc);
641 spin_unlock_irqrestore(&mz->lru_lock, flags);
652 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
654 return mem_cgroup_charge_common(page, mm, gfp_mask,
655 MEM_CGROUP_CHARGE_TYPE_MAPPED);
658 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
663 return mem_cgroup_charge_common(page, mm, gfp_mask,
664 MEM_CGROUP_CHARGE_TYPE_CACHE);
668 * Uncharging is always a welcome operation, we never complain, simply
671 void mem_cgroup_uncharge_page(struct page *page)
673 struct page_cgroup *pc;
674 struct mem_cgroup *mem;
675 struct mem_cgroup_per_zone *mz;
679 * Check if our page_cgroup is valid
681 lock_page_cgroup(page);
682 pc = page_get_page_cgroup(page);
686 if (atomic_dec_and_test(&pc->ref_cnt)) {
688 mz = page_cgroup_zoneinfo(pc);
690 * get page->cgroup and clear it under lock.
691 * force_empty can drop page->cgroup without checking refcnt.
693 unlock_page_cgroup(page);
694 if (clear_page_cgroup(page, pc) == pc) {
695 mem = pc->mem_cgroup;
697 res_counter_uncharge(&mem->res, PAGE_SIZE);
698 spin_lock_irqsave(&mz->lru_lock, flags);
699 __mem_cgroup_remove_list(pc);
700 spin_unlock_irqrestore(&mz->lru_lock, flags);
703 lock_page_cgroup(page);
707 unlock_page_cgroup(page);
711 * Returns non-zero if a page (under migration) has valid page_cgroup member.
712 * Refcnt of page_cgroup is incremented.
714 int mem_cgroup_prepare_migration(struct page *page)
716 struct page_cgroup *pc;
719 lock_page_cgroup(page);
720 pc = page_get_page_cgroup(page);
721 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
723 unlock_page_cgroup(page);
727 void mem_cgroup_end_migration(struct page *page)
729 mem_cgroup_uncharge_page(page);
733 * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
734 * And no race with uncharge() routines because page_cgroup for *page*
735 * has extra one reference by mem_cgroup_prepare_migration.
737 void mem_cgroup_page_migration(struct page *page, struct page *newpage)
739 struct page_cgroup *pc;
740 struct mem_cgroup_per_zone *mz;
744 pc = page_get_page_cgroup(page);
748 mz = page_cgroup_zoneinfo(pc);
749 if (clear_page_cgroup(page, pc) != pc)
752 spin_lock_irqsave(&mz->lru_lock, flags);
753 __mem_cgroup_remove_list(pc);
754 spin_unlock_irqrestore(&mz->lru_lock, flags);
757 lock_page_cgroup(newpage);
758 page_assign_page_cgroup(newpage, pc);
759 unlock_page_cgroup(newpage);
761 mz = page_cgroup_zoneinfo(pc);
762 spin_lock_irqsave(&mz->lru_lock, flags);
763 __mem_cgroup_add_list(pc);
764 spin_unlock_irqrestore(&mz->lru_lock, flags);
768 * This routine traverse page_cgroup in given list and drop them all.
769 * This routine ignores page_cgroup->ref_cnt.
770 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
772 #define FORCE_UNCHARGE_BATCH (128)
773 static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
774 struct mem_cgroup_per_zone *mz,
777 struct page_cgroup *pc;
781 struct list_head *list;
784 list = &mz->active_list;
786 list = &mz->inactive_list;
788 if (list_empty(list))
791 count = FORCE_UNCHARGE_BATCH;
792 spin_lock_irqsave(&mz->lru_lock, flags);
794 while (--count && !list_empty(list)) {
795 pc = list_entry(list->prev, struct page_cgroup, lru);
797 /* Avoid race with charge */
798 atomic_set(&pc->ref_cnt, 0);
799 if (clear_page_cgroup(page, pc) == pc) {
801 res_counter_uncharge(&mem->res, PAGE_SIZE);
802 __mem_cgroup_remove_list(pc);
804 } else /* being uncharged ? ...do relax */
808 spin_unlock_irqrestore(&mz->lru_lock, flags);
809 if (!list_empty(list)) {
816 * make mem_cgroup's charge to be 0 if there is no task.
817 * This enables deleting this mem_cgroup.
819 static int mem_cgroup_force_empty(struct mem_cgroup *mem)
826 * page reclaim code (kswapd etc..) will move pages between
827 * active_list <-> inactive_list while we don't take a lock.
828 * So, we have to do loop here until all lists are empty.
830 while (mem->res.usage > 0) {
831 if (atomic_read(&mem->css.cgroup->count) > 0)
833 for_each_node_state(node, N_POSSIBLE)
834 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
835 struct mem_cgroup_per_zone *mz;
836 mz = mem_cgroup_zoneinfo(mem, node, zid);
837 /* drop all page_cgroup in active_list */
838 mem_cgroup_force_empty_list(mem, mz, 1);
839 /* drop all page_cgroup in inactive_list */
840 mem_cgroup_force_empty_list(mem, mz, 0);
849 static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
851 *tmp = memparse(buf, &buf);
856 * Round up the value to the closest page size
858 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
862 static ssize_t mem_cgroup_read(struct cgroup *cont,
863 struct cftype *cft, struct file *file,
864 char __user *userbuf, size_t nbytes, loff_t *ppos)
866 return res_counter_read(&mem_cgroup_from_cont(cont)->res,
867 cft->private, userbuf, nbytes, ppos,
871 static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
872 struct file *file, const char __user *userbuf,
873 size_t nbytes, loff_t *ppos)
875 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
876 cft->private, userbuf, nbytes, ppos,
877 mem_cgroup_write_strategy);
880 static ssize_t mem_force_empty_write(struct cgroup *cont,
881 struct cftype *cft, struct file *file,
882 const char __user *userbuf,
883 size_t nbytes, loff_t *ppos)
885 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
886 int ret = mem_cgroup_force_empty(mem);
893 * Note: This should be removed if cgroup supports write-only file.
895 static ssize_t mem_force_empty_read(struct cgroup *cont,
897 struct file *file, char __user *userbuf,
898 size_t nbytes, loff_t *ppos)
903 static const struct mem_cgroup_stat_desc {
906 } mem_cgroup_stat_desc[] = {
907 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
908 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
911 static int mem_control_stat_show(struct seq_file *m, void *arg)
913 struct cgroup *cont = m->private;
914 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
915 struct mem_cgroup_stat *stat = &mem_cont->stat;
918 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
921 val = mem_cgroup_read_stat(stat, i);
922 val *= mem_cgroup_stat_desc[i].unit;
923 seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
926 /* showing # of active pages */
928 unsigned long active, inactive;
930 inactive = mem_cgroup_get_all_zonestat(mem_cont,
931 MEM_CGROUP_ZSTAT_INACTIVE);
932 active = mem_cgroup_get_all_zonestat(mem_cont,
933 MEM_CGROUP_ZSTAT_ACTIVE);
934 seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
935 seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
940 static const struct file_operations mem_control_stat_file_operations = {
943 .release = single_release,
946 static int mem_control_stat_open(struct inode *unused, struct file *file)
949 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
951 file->f_op = &mem_control_stat_file_operations;
952 return single_open(file, mem_control_stat_show, cont);
955 static struct cftype mem_cgroup_files[] = {
957 .name = "usage_in_bytes",
958 .private = RES_USAGE,
959 .read = mem_cgroup_read,
962 .name = "limit_in_bytes",
963 .private = RES_LIMIT,
964 .write = mem_cgroup_write,
965 .read = mem_cgroup_read,
969 .private = RES_FAILCNT,
970 .read = mem_cgroup_read,
973 .name = "force_empty",
974 .write = mem_force_empty_write,
975 .read = mem_force_empty_read,
979 .open = mem_control_stat_open,
983 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
985 struct mem_cgroup_per_node *pn;
986 struct mem_cgroup_per_zone *mz;
989 * This routine is called against possible nodes.
990 * But it's BUG to call kmalloc() against offline node.
992 * TODO: this routine can waste much memory for nodes which will
993 * never be onlined. It's better to use memory hotplug callback
996 if (node_state(node, N_HIGH_MEMORY))
997 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
999 pn = kmalloc(sizeof(*pn), GFP_KERNEL);
1003 mem->info.nodeinfo[node] = pn;
1004 memset(pn, 0, sizeof(*pn));
1006 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1007 mz = &pn->zoneinfo[zone];
1008 INIT_LIST_HEAD(&mz->active_list);
1009 INIT_LIST_HEAD(&mz->inactive_list);
1010 spin_lock_init(&mz->lru_lock);
1015 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1017 kfree(mem->info.nodeinfo[node]);
1020 static struct cgroup_subsys_state *
1021 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1023 struct mem_cgroup *mem;
1026 if (unlikely((cont->parent) == NULL)) {
1027 mem = &init_mem_cgroup;
1028 init_mm.mem_cgroup = mem;
1030 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
1033 return ERR_PTR(-ENOMEM);
1035 res_counter_init(&mem->res);
1037 memset(&mem->info, 0, sizeof(mem->info));
1039 for_each_node_state(node, N_POSSIBLE)
1040 if (alloc_mem_cgroup_per_zone_info(mem, node))
1045 for_each_node_state(node, N_POSSIBLE)
1046 free_mem_cgroup_per_zone_info(mem, node);
1047 if (cont->parent != NULL)
1049 return ERR_PTR(-ENOMEM);
1052 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1053 struct cgroup *cont)
1055 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1056 mem_cgroup_force_empty(mem);
1059 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1060 struct cgroup *cont)
1063 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1065 for_each_node_state(node, N_POSSIBLE)
1066 free_mem_cgroup_per_zone_info(mem, node);
1068 kfree(mem_cgroup_from_cont(cont));
1071 static int mem_cgroup_populate(struct cgroup_subsys *ss,
1072 struct cgroup *cont)
1074 return cgroup_add_files(cont, ss, mem_cgroup_files,
1075 ARRAY_SIZE(mem_cgroup_files));
1078 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1079 struct cgroup *cont,
1080 struct cgroup *old_cont,
1081 struct task_struct *p)
1083 struct mm_struct *mm;
1084 struct mem_cgroup *mem, *old_mem;
1086 mm = get_task_mm(p);
1090 mem = mem_cgroup_from_cont(cont);
1091 old_mem = mem_cgroup_from_cont(old_cont);
1097 * Only thread group leaders are allowed to migrate, the mm_struct is
1098 * in effect owned by the leader
1100 if (p->tgid != p->pid)
1104 rcu_assign_pointer(mm->mem_cgroup, mem);
1105 css_put(&old_mem->css);
1111 struct cgroup_subsys mem_cgroup_subsys = {
1113 .subsys_id = mem_cgroup_subsys_id,
1114 .create = mem_cgroup_create,
1115 .pre_destroy = mem_cgroup_pre_destroy,
1116 .destroy = mem_cgroup_destroy,
1117 .populate = mem_cgroup_populate,
1118 .attach = mem_cgroup_move_task,