blob: e430e04997eed69ac07466ad6a7dbac2b12d8d1c [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 */
15
16#include <linux/cgroup.h>
Johannes Weiner71f87bee2014-12-10 15:42:34 -080017#include <linux/page_counter.h>
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070018#include <linux/slab.h>
19#include <linux/hugetlb.h>
20#include <linux/hugetlb_cgroup.h>
21
22struct hugetlb_cgroup {
23 struct cgroup_subsys_state css;
24 /*
25 * the counter to account for hugepages from hugetlb.
26 */
Johannes Weiner71f87bee2014-12-10 15:42:34 -080027 struct page_counter hugepage[HUGE_MAX_HSTATE];
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070028};
29
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -070030#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
31#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
32#define MEMFILE_ATTR(val) ((val) & 0xffff)
33
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070034static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
35
36static inline
37struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
38{
Tejun Heoa7c6d552013-08-08 20:11:23 -040039 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070040}
41
42static inline
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070043struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
44{
Tejun Heo073219e2014-02-08 10:36:58 -050045 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070046}
47
48static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
49{
50 return (h_cg == root_h_cgroup);
51}
52
Tejun Heo3f798512013-08-08 20:11:22 -040053static inline struct hugetlb_cgroup *
54parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070055{
Tejun Heo5c9d5352014-05-16 13:22:48 -040056 return hugetlb_cgroup_from_css(h_cg->css.parent);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070057}
58
Tejun Heo3f798512013-08-08 20:11:22 -040059static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070060{
61 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070062
63 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
Johannes Weiner71f87bee2014-12-10 15:42:34 -080064 if (page_counter_read(&h_cg->hugepage[idx]))
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070065 return true;
66 }
67 return false;
68}
69
David Rientjes297880f2016-05-20 16:57:50 -070070static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
71 struct hugetlb_cgroup *parent_h_cgroup)
72{
73 int idx;
74
75 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
76 struct page_counter *counter = &h_cgroup->hugepage[idx];
77 struct page_counter *parent = NULL;
78 unsigned long limit;
79 int ret;
80
81 if (parent_h_cgroup)
82 parent = &parent_h_cgroup->hugepage[idx];
83 page_counter_init(counter, parent);
84
85 limit = round_down(PAGE_COUNTER_MAX,
86 1 << huge_page_order(&hstates[idx]));
87 ret = page_counter_limit(counter, limit);
88 VM_BUG_ON(ret);
89 }
90}
91
Tejun Heoeb954192013-08-08 20:11:23 -040092static struct cgroup_subsys_state *
93hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070094{
Tejun Heoeb954192013-08-08 20:11:23 -040095 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
96 struct hugetlb_cgroup *h_cgroup;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070097
98 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
99 if (!h_cgroup)
100 return ERR_PTR(-ENOMEM);
101
David Rientjes297880f2016-05-20 16:57:50 -0700102 if (!parent_h_cgroup)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700103 root_h_cgroup = h_cgroup;
David Rientjes297880f2016-05-20 16:57:50 -0700104
105 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700106 return &h_cgroup->css;
107}
108
Tejun Heoeb954192013-08-08 20:11:23 -0400109static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700110{
111 struct hugetlb_cgroup *h_cgroup;
112
Tejun Heoeb954192013-08-08 20:11:23 -0400113 h_cgroup = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700114 kfree(h_cgroup);
115}
116
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700117
118/*
119 * Should be called with hugetlb_lock held.
120 * Since we are holding hugetlb_lock, pages cannot get moved from
121 * active list or uncharged from the cgroup, So no need to get
122 * page reference and test for page active here. This function
123 * cannot fail.
124 */
Tejun Heo3f798512013-08-08 20:11:22 -0400125static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700126 struct page *page)
127{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800128 unsigned int nr_pages;
129 struct page_counter *counter;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700130 struct hugetlb_cgroup *page_hcg;
Tejun Heo3f798512013-08-08 20:11:22 -0400131 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700132
133 page_hcg = hugetlb_cgroup_from_page(page);
134 /*
135 * We can have pages in active list without any cgroup
136 * ie, hugepage with less than 3 pages. We can safely
137 * ignore those pages.
138 */
139 if (!page_hcg || page_hcg != h_cg)
140 goto out;
141
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800142 nr_pages = 1 << compound_order(page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700143 if (!parent) {
144 parent = root_h_cgroup;
145 /* root has no limit */
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800146 page_counter_charge(&parent->hugepage[idx], nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700147 }
148 counter = &h_cg->hugepage[idx];
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800149 /* Take the pages off the local counter */
150 page_counter_cancel(counter, nr_pages);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700151
152 set_hugetlb_cgroup(page, parent);
153out:
154 return;
155}
156
157/*
158 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
159 * the parent cgroup.
160 */
Tejun Heoeb954192013-08-08 20:11:23 -0400161static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700162{
Tejun Heoeb954192013-08-08 20:11:23 -0400163 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700164 struct hstate *h;
165 struct page *page;
Michal Hocko9d093cb2012-10-26 13:37:33 +0200166 int idx = 0;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700167
168 do {
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700169 for_each_hstate(h) {
170 spin_lock(&hugetlb_lock);
171 list_for_each_entry(page, &h->hugepage_activelist, lru)
Tejun Heo3f798512013-08-08 20:11:22 -0400172 hugetlb_cgroup_move_parent(idx, h_cg, page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700173
174 spin_unlock(&hugetlb_lock);
175 idx++;
176 }
177 cond_resched();
Tejun Heo3f798512013-08-08 20:11:22 -0400178 } while (hugetlb_cgroup_have_usage(h_cg));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700179}
180
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700181int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
182 struct hugetlb_cgroup **ptr)
183{
184 int ret = 0;
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800185 struct page_counter *counter;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700186 struct hugetlb_cgroup *h_cg = NULL;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700187
188 if (hugetlb_cgroup_disabled())
189 goto done;
190 /*
191 * We don't charge any cgroup if the compound page have less
192 * than 3 pages.
193 */
194 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
195 goto done;
196again:
197 rcu_read_lock();
198 h_cg = hugetlb_cgroup_from_task(current);
Roman Gushchin91e52522019-11-15 17:34:46 -0800199 if (!css_tryget(&h_cg->css)) {
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700200 rcu_read_unlock();
201 goto again;
202 }
203 rcu_read_unlock();
204
Johannes Weiner6071ca52015-11-05 18:50:26 -0800205 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter))
206 ret = -ENOMEM;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700207 css_put(&h_cg->css);
208done:
209 *ptr = h_cg;
210 return ret;
211}
212
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700213/* Should be called with hugetlb_lock held */
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700214void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
215 struct hugetlb_cgroup *h_cg,
216 struct page *page)
217{
218 if (hugetlb_cgroup_disabled() || !h_cg)
219 return;
220
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700221 set_hugetlb_cgroup(page, h_cg);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700222 return;
223}
224
225/*
226 * Should be called with hugetlb_lock held
227 */
228void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
229 struct page *page)
230{
231 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700232
233 if (hugetlb_cgroup_disabled())
234 return;
Michal Hocko7ea85742014-08-29 15:18:42 -0700235 lockdep_assert_held(&hugetlb_lock);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700236 h_cg = hugetlb_cgroup_from_page(page);
237 if (unlikely(!h_cg))
238 return;
239 set_hugetlb_cgroup(page, NULL);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800240 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700241 return;
242}
243
244void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
245 struct hugetlb_cgroup *h_cg)
246{
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700247 if (hugetlb_cgroup_disabled() || !h_cg)
248 return;
249
250 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
251 return;
252
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800253 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700254 return;
255}
256
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800257enum {
258 RES_USAGE,
259 RES_LIMIT,
260 RES_MAX_USAGE,
261 RES_FAILCNT,
262};
263
Tejun Heo716f4792013-12-05 12:28:03 -0500264static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
265 struct cftype *cft)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700266{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800267 struct page_counter *counter;
Tejun Heo182446d2013-08-08 20:11:24 -0400268 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700269
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800270 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700271
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800272 switch (MEMFILE_ATTR(cft->private)) {
273 case RES_USAGE:
274 return (u64)page_counter_read(counter) * PAGE_SIZE;
275 case RES_LIMIT:
276 return (u64)counter->limit * PAGE_SIZE;
277 case RES_MAX_USAGE:
278 return (u64)counter->watermark * PAGE_SIZE;
279 case RES_FAILCNT:
280 return counter->failcnt;
281 default:
282 BUG();
283 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700284}
285
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800286static DEFINE_MUTEX(hugetlb_limit_mutex);
287
Tejun Heo451af502014-05-13 12:16:21 -0400288static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
289 char *buf, size_t nbytes, loff_t off)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700290{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800291 int ret, idx;
292 unsigned long nr_pages;
Tejun Heo451af502014-05-13 12:16:21 -0400293 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700294
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800295 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
296 return -EINVAL;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700297
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800298 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -0800299 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800300 if (ret)
301 return ret;
302
303 idx = MEMFILE_IDX(of_cft(of)->private);
David Rientjes297880f2016-05-20 16:57:50 -0700304 nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800305
306 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700307 case RES_LIMIT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800308 mutex_lock(&hugetlb_limit_mutex);
309 ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
310 mutex_unlock(&hugetlb_limit_mutex);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700311 break;
312 default:
313 ret = -EINVAL;
314 break;
315 }
Tejun Heo451af502014-05-13 12:16:21 -0400316 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700317}
318
Tejun Heo6770c642014-05-13 12:16:21 -0400319static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
320 char *buf, size_t nbytes, loff_t off)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700321{
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800322 int ret = 0;
323 struct page_counter *counter;
Tejun Heo6770c642014-05-13 12:16:21 -0400324 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700325
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800326 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700327
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800328 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700329 case RES_MAX_USAGE:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800330 page_counter_reset_watermark(counter);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700331 break;
332 case RES_FAILCNT:
Johannes Weiner71f87bee2014-12-10 15:42:34 -0800333 counter->failcnt = 0;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700334 break;
335 default:
336 ret = -EINVAL;
337 break;
338 }
Tejun Heo6770c642014-05-13 12:16:21 -0400339 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700340}
341
342static char *mem_fmt(char *buf, int size, unsigned long hsize)
343{
344 if (hsize >= (1UL << 30))
345 snprintf(buf, size, "%luGB", hsize >> 30);
346 else if (hsize >= (1UL << 20))
347 snprintf(buf, size, "%luMB", hsize >> 20);
348 else
349 snprintf(buf, size, "%luKB", hsize >> 10);
350 return buf;
351}
352
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800353static void __init __hugetlb_cgroup_file_init(int idx)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700354{
355 char buf[32];
356 struct cftype *cft;
357 struct hstate *h = &hstates[idx];
358
359 /* format the size */
360 mem_fmt(buf, 32, huge_page_size(h));
361
362 /* Add the limit file */
363 cft = &h->cgroup_files[0];
364 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
365 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
Tejun Heo716f4792013-12-05 12:28:03 -0500366 cft->read_u64 = hugetlb_cgroup_read_u64;
Tejun Heo451af502014-05-13 12:16:21 -0400367 cft->write = hugetlb_cgroup_write;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700368
369 /* Add the usage file */
370 cft = &h->cgroup_files[1];
371 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
372 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
Tejun Heo716f4792013-12-05 12:28:03 -0500373 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700374
375 /* Add the MAX usage file */
376 cft = &h->cgroup_files[2];
377 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
378 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
Tejun Heo6770c642014-05-13 12:16:21 -0400379 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500380 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700381
382 /* Add the failcntfile */
383 cft = &h->cgroup_files[3];
384 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
385 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
Tejun Heo6770c642014-05-13 12:16:21 -0400386 cft->write = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500387 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700388
389 /* NULL terminate the last cft */
390 cft = &h->cgroup_files[4];
391 memset(cft, 0, sizeof(*cft));
392
Tejun Heo2cf669a2014-07-15 11:05:09 -0400393 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
394 h->cgroup_files));
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800395}
396
397void __init hugetlb_cgroup_file_init(void)
398{
399 struct hstate *h;
400
401 for_each_hstate(h) {
402 /*
403 * Add cgroup control files only if the huge page consists
404 * of more than two normal pages. This is because we use
Kirill A. Shutemov1d798ca2015-11-06 16:29:54 -0800405 * page[2].private for storing cgroup details.
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800406 */
407 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
408 __hugetlb_cgroup_file_init(hstate_index(h));
409 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700410}
411
Aneesh Kumar K.V75754682012-07-31 16:42:36 -0700412/*
413 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
414 * when we migrate hugepages
415 */
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700416void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
417{
418 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700419 struct hstate *h = page_hstate(oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700420
421 if (hugetlb_cgroup_disabled())
422 return;
423
Sasha Levin309381fea2014-01-23 15:52:54 -0800424 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700425 spin_lock(&hugetlb_lock);
426 h_cg = hugetlb_cgroup_from_page(oldhpage);
427 set_hugetlb_cgroup(oldhpage, NULL);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700428
429 /* move the h_cg details to new cgroup */
430 set_hugetlb_cgroup(newhpage, h_cg);
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700431 list_move(&newhpage->lru, &h->hugepage_activelist);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700432 spin_unlock(&hugetlb_lock);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700433 return;
434}
435
Tejun Heo073219e2014-02-08 10:36:58 -0500436struct cgroup_subsys hugetlb_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -0800437 .css_alloc = hugetlb_cgroup_css_alloc,
438 .css_offline = hugetlb_cgroup_css_offline,
439 .css_free = hugetlb_cgroup_css_free,
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700440};