3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
16 #include <linux/cgroup.h>
17 #include <linux/slab.h>
18 #include <linux/hugetlb.h>
19 #include <linux/hugetlb_cgroup.h>
21 struct hugetlb_cgroup {
22 struct cgroup_subsys_state css;
24 * the counter to account for hugepages from hugetlb.
26 struct res_counter hugepage[HUGE_MAX_HSTATE];
29 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
30 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
31 #define MEMFILE_ATTR(val) ((val) & 0xffff)
33 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
36 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
38 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
42 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
44 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
47 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
49 return (h_cg == root_h_cgroup);
52 static inline struct hugetlb_cgroup *
53 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
55 return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
58 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
62 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
63 if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
69 static struct cgroup_subsys_state *
70 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
72 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
73 struct hugetlb_cgroup *h_cgroup;
76 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
78 return ERR_PTR(-ENOMEM);
80 if (parent_h_cgroup) {
81 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
82 res_counter_init(&h_cgroup->hugepage[idx],
83 &parent_h_cgroup->hugepage[idx]);
85 root_h_cgroup = h_cgroup;
86 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
87 res_counter_init(&h_cgroup->hugepage[idx], NULL);
89 return &h_cgroup->css;
92 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
94 struct hugetlb_cgroup *h_cgroup;
96 h_cgroup = hugetlb_cgroup_from_css(css);
102 * Should be called with hugetlb_lock held.
103 * Since we are holding hugetlb_lock, pages cannot get moved from
104 * active list or uncharged from the cgroup, So no need to get
105 * page reference and test for page active here. This function
108 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
112 struct res_counter *counter;
113 struct res_counter *fail_res;
114 struct hugetlb_cgroup *page_hcg;
115 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
117 page_hcg = hugetlb_cgroup_from_page(page);
119 * We can have pages in active list without any cgroup
120 * ie, hugepage with less than 3 pages. We can safely
121 * ignore those pages.
123 if (!page_hcg || page_hcg != h_cg)
126 csize = PAGE_SIZE << compound_order(page);
128 parent = root_h_cgroup;
129 /* root has no limit */
130 res_counter_charge_nofail(&parent->hugepage[idx],
133 counter = &h_cg->hugepage[idx];
134 res_counter_uncharge_until(counter, counter->parent, csize);
136 set_hugetlb_cgroup(page, parent);
142 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
145 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
147 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
154 spin_lock(&hugetlb_lock);
155 list_for_each_entry(page, &h->hugepage_activelist, lru)
156 hugetlb_cgroup_move_parent(idx, h_cg, page);
158 spin_unlock(&hugetlb_lock);
162 } while (hugetlb_cgroup_have_usage(h_cg));
165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166 struct hugetlb_cgroup **ptr)
169 struct res_counter *fail_res;
170 struct hugetlb_cgroup *h_cg = NULL;
171 unsigned long csize = nr_pages * PAGE_SIZE;
173 if (hugetlb_cgroup_disabled())
176 * We don't charge any cgroup if the compound page have less
179 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
183 h_cg = hugetlb_cgroup_from_task(current);
184 if (!css_tryget(&h_cg->css)) {
190 ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
197 /* Should be called with hugetlb_lock held */
198 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
199 struct hugetlb_cgroup *h_cg,
202 if (hugetlb_cgroup_disabled() || !h_cg)
205 set_hugetlb_cgroup(page, h_cg);
210 * Should be called with hugetlb_lock held
212 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
215 struct hugetlb_cgroup *h_cg;
216 unsigned long csize = nr_pages * PAGE_SIZE;
218 if (hugetlb_cgroup_disabled())
220 VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
221 h_cg = hugetlb_cgroup_from_page(page);
224 set_hugetlb_cgroup(page, NULL);
225 res_counter_uncharge(&h_cg->hugepage[idx], csize);
229 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
230 struct hugetlb_cgroup *h_cg)
232 unsigned long csize = nr_pages * PAGE_SIZE;
234 if (hugetlb_cgroup_disabled() || !h_cg)
237 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
240 res_counter_uncharge(&h_cg->hugepage[idx], csize);
244 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
248 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
250 idx = MEMFILE_IDX(cft->private);
251 name = MEMFILE_ATTR(cft->private);
253 return res_counter_read_u64(&h_cg->hugepage[idx], name);
256 static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
257 struct cftype *cft, char *buffer)
260 unsigned long long val;
261 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
263 idx = MEMFILE_IDX(cft->private);
264 name = MEMFILE_ATTR(cft->private);
268 if (hugetlb_cgroup_is_root(h_cg)) {
269 /* Can't set limit on root */
273 /* This function does all necessary parse...reuse it */
274 ret = res_counter_memparse_write_strategy(buffer, &val);
277 ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
286 static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
289 int idx, name, ret = 0;
290 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
292 idx = MEMFILE_IDX(event);
293 name = MEMFILE_ATTR(event);
297 res_counter_reset_max(&h_cg->hugepage[idx]);
300 res_counter_reset_failcnt(&h_cg->hugepage[idx]);
309 static char *mem_fmt(char *buf, int size, unsigned long hsize)
311 if (hsize >= (1UL << 30))
312 snprintf(buf, size, "%luGB", hsize >> 30);
313 else if (hsize >= (1UL << 20))
314 snprintf(buf, size, "%luMB", hsize >> 20);
316 snprintf(buf, size, "%luKB", hsize >> 10);
320 static void __init __hugetlb_cgroup_file_init(int idx)
324 struct hstate *h = &hstates[idx];
326 /* format the size */
327 mem_fmt(buf, 32, huge_page_size(h));
329 /* Add the limit file */
330 cft = &h->cgroup_files[0];
331 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
332 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
333 cft->read_u64 = hugetlb_cgroup_read_u64;
334 cft->write_string = hugetlb_cgroup_write;
336 /* Add the usage file */
337 cft = &h->cgroup_files[1];
338 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
339 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
340 cft->read_u64 = hugetlb_cgroup_read_u64;
342 /* Add the MAX usage file */
343 cft = &h->cgroup_files[2];
344 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
345 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
346 cft->trigger = hugetlb_cgroup_reset;
347 cft->read_u64 = hugetlb_cgroup_read_u64;
349 /* Add the failcntfile */
350 cft = &h->cgroup_files[3];
351 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
352 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
353 cft->trigger = hugetlb_cgroup_reset;
354 cft->read_u64 = hugetlb_cgroup_read_u64;
356 /* NULL terminate the last cft */
357 cft = &h->cgroup_files[4];
358 memset(cft, 0, sizeof(*cft));
360 WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
365 void __init hugetlb_cgroup_file_init(void)
371 * Add cgroup control files only if the huge page consists
372 * of more than two normal pages. This is because we use
373 * page[2].lru.next for storing cgroup details.
375 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
376 __hugetlb_cgroup_file_init(hstate_index(h));
381 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
382 * when we migrate hugepages
384 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
386 struct hugetlb_cgroup *h_cg;
387 struct hstate *h = page_hstate(oldhpage);
389 if (hugetlb_cgroup_disabled())
392 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
393 spin_lock(&hugetlb_lock);
394 h_cg = hugetlb_cgroup_from_page(oldhpage);
395 set_hugetlb_cgroup(oldhpage, NULL);
397 /* move the h_cg details to new cgroup */
398 set_hugetlb_cgroup(newhpage, h_cg);
399 list_move(&newhpage->lru, &h->hugepage_activelist);
400 spin_unlock(&hugetlb_lock);
404 struct cgroup_subsys hugetlb_cgrp_subsys = {
405 .css_alloc = hugetlb_cgroup_css_alloc,
406 .css_offline = hugetlb_cgroup_css_offline,
407 .css_free = hugetlb_cgroup_css_free,