blob: 191de26b0148391415f2b6bb505c7f0b8406aa3e [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 */
15
16#include <linux/cgroup.h>
17#include <linux/slab.h>
18#include <linux/hugetlb.h>
19#include <linux/hugetlb_cgroup.h>
20
21struct hugetlb_cgroup {
22 struct cgroup_subsys_state css;
23 /*
24 * the counter to account for hugepages from hugetlb.
25 */
26 struct res_counter hugepage[HUGE_MAX_HSTATE];
27};
28
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -070029#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
30#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
31#define MEMFILE_ATTR(val) ((val) & 0xffff)
32
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070033static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
34
35static inline
36struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
37{
Tejun Heoa7c6d552013-08-08 20:11:23 -040038 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070039}
40
41static inline
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070042struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
43{
Tejun Heo073219e2014-02-08 10:36:58 -050044 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070045}
46
47static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
48{
49 return (h_cg == root_h_cgroup);
50}
51
Tejun Heo3f798512013-08-08 20:11:22 -040052static inline struct hugetlb_cgroup *
53parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070054{
Tejun Heo63876982013-08-08 20:11:23 -040055 return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070056}
57
Tejun Heo3f798512013-08-08 20:11:22 -040058static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070059{
60 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070061
62 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
63 if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
64 return true;
65 }
66 return false;
67}
68
Tejun Heoeb954192013-08-08 20:11:23 -040069static struct cgroup_subsys_state *
70hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070071{
Tejun Heoeb954192013-08-08 20:11:23 -040072 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
73 struct hugetlb_cgroup *h_cgroup;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070074 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070075
76 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
77 if (!h_cgroup)
78 return ERR_PTR(-ENOMEM);
79
Tejun Heoeb954192013-08-08 20:11:23 -040080 if (parent_h_cgroup) {
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070081 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
82 res_counter_init(&h_cgroup->hugepage[idx],
83 &parent_h_cgroup->hugepage[idx]);
84 } else {
85 root_h_cgroup = h_cgroup;
86 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
87 res_counter_init(&h_cgroup->hugepage[idx], NULL);
88 }
89 return &h_cgroup->css;
90}
91
Tejun Heoeb954192013-08-08 20:11:23 -040092static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070093{
94 struct hugetlb_cgroup *h_cgroup;
95
Tejun Heoeb954192013-08-08 20:11:23 -040096 h_cgroup = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070097 kfree(h_cgroup);
98}
99
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700100
101/*
102 * Should be called with hugetlb_lock held.
103 * Since we are holding hugetlb_lock, pages cannot get moved from
104 * active list or uncharged from the cgroup, So no need to get
105 * page reference and test for page active here. This function
106 * cannot fail.
107 */
Tejun Heo3f798512013-08-08 20:11:22 -0400108static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700109 struct page *page)
110{
111 int csize;
112 struct res_counter *counter;
113 struct res_counter *fail_res;
114 struct hugetlb_cgroup *page_hcg;
Tejun Heo3f798512013-08-08 20:11:22 -0400115 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700116
117 page_hcg = hugetlb_cgroup_from_page(page);
118 /*
119 * We can have pages in active list without any cgroup
120 * ie, hugepage with less than 3 pages. We can safely
121 * ignore those pages.
122 */
123 if (!page_hcg || page_hcg != h_cg)
124 goto out;
125
126 csize = PAGE_SIZE << compound_order(page);
127 if (!parent) {
128 parent = root_h_cgroup;
129 /* root has no limit */
130 res_counter_charge_nofail(&parent->hugepage[idx],
131 csize, &fail_res);
132 }
133 counter = &h_cg->hugepage[idx];
134 res_counter_uncharge_until(counter, counter->parent, csize);
135
136 set_hugetlb_cgroup(page, parent);
137out:
138 return;
139}
140
141/*
142 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
143 * the parent cgroup.
144 */
Tejun Heoeb954192013-08-08 20:11:23 -0400145static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700146{
Tejun Heoeb954192013-08-08 20:11:23 -0400147 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700148 struct hstate *h;
149 struct page *page;
Michal Hocko9d093cb2012-10-26 13:37:33 +0200150 int idx = 0;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700151
152 do {
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700153 for_each_hstate(h) {
154 spin_lock(&hugetlb_lock);
155 list_for_each_entry(page, &h->hugepage_activelist, lru)
Tejun Heo3f798512013-08-08 20:11:22 -0400156 hugetlb_cgroup_move_parent(idx, h_cg, page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700157
158 spin_unlock(&hugetlb_lock);
159 idx++;
160 }
161 cond_resched();
Tejun Heo3f798512013-08-08 20:11:22 -0400162 } while (hugetlb_cgroup_have_usage(h_cg));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700163}
164
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700165int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166 struct hugetlb_cgroup **ptr)
167{
168 int ret = 0;
169 struct res_counter *fail_res;
170 struct hugetlb_cgroup *h_cg = NULL;
171 unsigned long csize = nr_pages * PAGE_SIZE;
172
173 if (hugetlb_cgroup_disabled())
174 goto done;
175 /*
176 * We don't charge any cgroup if the compound page have less
177 * than 3 pages.
178 */
179 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
180 goto done;
181again:
182 rcu_read_lock();
183 h_cg = hugetlb_cgroup_from_task(current);
Tejun Heoec903c02014-05-13 12:11:01 -0400184 if (!css_tryget_online(&h_cg->css)) {
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700185 rcu_read_unlock();
186 goto again;
187 }
188 rcu_read_unlock();
189
190 ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
191 css_put(&h_cg->css);
192done:
193 *ptr = h_cg;
194 return ret;
195}
196
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700197/* Should be called with hugetlb_lock held */
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700198void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
199 struct hugetlb_cgroup *h_cg,
200 struct page *page)
201{
202 if (hugetlb_cgroup_disabled() || !h_cg)
203 return;
204
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700205 set_hugetlb_cgroup(page, h_cg);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700206 return;
207}
208
209/*
210 * Should be called with hugetlb_lock held
211 */
212void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
213 struct page *page)
214{
215 struct hugetlb_cgroup *h_cg;
216 unsigned long csize = nr_pages * PAGE_SIZE;
217
218 if (hugetlb_cgroup_disabled())
219 return;
220 VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
221 h_cg = hugetlb_cgroup_from_page(page);
222 if (unlikely(!h_cg))
223 return;
224 set_hugetlb_cgroup(page, NULL);
225 res_counter_uncharge(&h_cg->hugepage[idx], csize);
226 return;
227}
228
229void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
230 struct hugetlb_cgroup *h_cg)
231{
232 unsigned long csize = nr_pages * PAGE_SIZE;
233
234 if (hugetlb_cgroup_disabled() || !h_cg)
235 return;
236
237 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
238 return;
239
240 res_counter_uncharge(&h_cg->hugepage[idx], csize);
241 return;
242}
243
Tejun Heo716f4792013-12-05 12:28:03 -0500244static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
245 struct cftype *cft)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700246{
Tejun Heo716f4792013-12-05 12:28:03 -0500247 int idx, name;
Tejun Heo182446d2013-08-08 20:11:24 -0400248 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700249
250 idx = MEMFILE_IDX(cft->private);
251 name = MEMFILE_ATTR(cft->private);
252
Tejun Heo716f4792013-12-05 12:28:03 -0500253 return res_counter_read_u64(&h_cg->hugepage[idx], name);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700254}
255
Tejun Heo451af502014-05-13 12:16:21 -0400256static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
257 char *buf, size_t nbytes, loff_t off)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700258{
259 int idx, name, ret;
260 unsigned long long val;
Tejun Heo451af502014-05-13 12:16:21 -0400261 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700262
Tejun Heo451af502014-05-13 12:16:21 -0400263 buf = strstrip(buf);
264 idx = MEMFILE_IDX(of_cft(of)->private);
265 name = MEMFILE_ATTR(of_cft(of)->private);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700266
267 switch (name) {
268 case RES_LIMIT:
269 if (hugetlb_cgroup_is_root(h_cg)) {
270 /* Can't set limit on root */
271 ret = -EINVAL;
272 break;
273 }
274 /* This function does all necessary parse...reuse it */
Tejun Heo451af502014-05-13 12:16:21 -0400275 ret = res_counter_memparse_write_strategy(buf, &val);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700276 if (ret)
277 break;
278 ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
279 break;
280 default:
281 ret = -EINVAL;
282 break;
283 }
Tejun Heo451af502014-05-13 12:16:21 -0400284 return ret ?: nbytes;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700285}
286
Tejun Heo182446d2013-08-08 20:11:24 -0400287static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
288 unsigned int event)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700289{
290 int idx, name, ret = 0;
Tejun Heo182446d2013-08-08 20:11:24 -0400291 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700292
293 idx = MEMFILE_IDX(event);
294 name = MEMFILE_ATTR(event);
295
296 switch (name) {
297 case RES_MAX_USAGE:
298 res_counter_reset_max(&h_cg->hugepage[idx]);
299 break;
300 case RES_FAILCNT:
301 res_counter_reset_failcnt(&h_cg->hugepage[idx]);
302 break;
303 default:
304 ret = -EINVAL;
305 break;
306 }
307 return ret;
308}
309
310static char *mem_fmt(char *buf, int size, unsigned long hsize)
311{
312 if (hsize >= (1UL << 30))
313 snprintf(buf, size, "%luGB", hsize >> 30);
314 else if (hsize >= (1UL << 20))
315 snprintf(buf, size, "%luMB", hsize >> 20);
316 else
317 snprintf(buf, size, "%luKB", hsize >> 10);
318 return buf;
319}
320
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800321static void __init __hugetlb_cgroup_file_init(int idx)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700322{
323 char buf[32];
324 struct cftype *cft;
325 struct hstate *h = &hstates[idx];
326
327 /* format the size */
328 mem_fmt(buf, 32, huge_page_size(h));
329
330 /* Add the limit file */
331 cft = &h->cgroup_files[0];
332 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
333 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
Tejun Heo716f4792013-12-05 12:28:03 -0500334 cft->read_u64 = hugetlb_cgroup_read_u64;
Tejun Heo451af502014-05-13 12:16:21 -0400335 cft->write = hugetlb_cgroup_write;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700336
337 /* Add the usage file */
338 cft = &h->cgroup_files[1];
339 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
340 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
Tejun Heo716f4792013-12-05 12:28:03 -0500341 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700342
343 /* Add the MAX usage file */
344 cft = &h->cgroup_files[2];
345 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
346 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
347 cft->trigger = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500348 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700349
350 /* Add the failcntfile */
351 cft = &h->cgroup_files[3];
352 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
353 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
354 cft->trigger = hugetlb_cgroup_reset;
Tejun Heo716f4792013-12-05 12:28:03 -0500355 cft->read_u64 = hugetlb_cgroup_read_u64;
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700356
357 /* NULL terminate the last cft */
358 cft = &h->cgroup_files[4];
359 memset(cft, 0, sizeof(*cft));
360
Tejun Heo073219e2014-02-08 10:36:58 -0500361 WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700362
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800363 return;
364}
365
366void __init hugetlb_cgroup_file_init(void)
367{
368 struct hstate *h;
369
370 for_each_hstate(h) {
371 /*
372 * Add cgroup control files only if the huge page consists
373 * of more than two normal pages. This is because we use
374 * page[2].lru.next for storing cgroup details.
375 */
376 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
377 __hugetlb_cgroup_file_init(hstate_index(h));
378 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700379}
380
Aneesh Kumar K.V75754682012-07-31 16:42:36 -0700381/*
382 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
383 * when we migrate hugepages
384 */
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700385void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
386{
387 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700388 struct hstate *h = page_hstate(oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700389
390 if (hugetlb_cgroup_disabled())
391 return;
392
Sasha Levin309381fea2014-01-23 15:52:54 -0800393 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700394 spin_lock(&hugetlb_lock);
395 h_cg = hugetlb_cgroup_from_page(oldhpage);
396 set_hugetlb_cgroup(oldhpage, NULL);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700397
398 /* move the h_cg details to new cgroup */
399 set_hugetlb_cgroup(newhpage, h_cg);
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700400 list_move(&newhpage->lru, &h->hugepage_activelist);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700401 spin_unlock(&hugetlb_lock);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700402 return;
403}
404
Tejun Heo073219e2014-02-08 10:36:58 -0500405struct cgroup_subsys hugetlb_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -0800406 .css_alloc = hugetlb_cgroup_css_alloc,
407 .css_offline = hugetlb_cgroup_css_offline,
408 .css_free = hugetlb_cgroup_css_free,
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700409};