blob: e2132435060fdc404f28ecc53975f15bf25b616f [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 */
15
16#include <linux/cgroup.h>
17#include <linux/slab.h>
18#include <linux/hugetlb.h>
19#include <linux/hugetlb_cgroup.h>
20
21struct hugetlb_cgroup {
22 struct cgroup_subsys_state css;
23 /*
24 * the counter to account for hugepages from hugetlb.
25 */
26 struct res_counter hugepage[HUGE_MAX_HSTATE];
27};
28
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -070029#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
30#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
31#define MEMFILE_ATTR(val) ((val) & 0xffff)
32
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070033struct cgroup_subsys hugetlb_subsys __read_mostly;
34static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
35
36static inline
37struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
38{
Tejun Heoa7c6d552013-08-08 20:11:23 -040039 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070040}
41
42static inline
43struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
44{
Tejun Heo8af01f52013-08-08 20:11:22 -040045 return hugetlb_cgroup_from_css(cgroup_css(cgroup, hugetlb_subsys_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070046}
47
48static inline
49struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
50{
Tejun Heo8af01f52013-08-08 20:11:22 -040051 return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070052}
53
54static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
55{
56 return (h_cg == root_h_cgroup);
57}
58
Tejun Heo3f798512013-08-08 20:11:22 -040059static inline struct hugetlb_cgroup *
60parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070061{
Tejun Heo63876982013-08-08 20:11:23 -040062 return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070063}
64
Tejun Heo3f798512013-08-08 20:11:22 -040065static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070066{
67 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070068
69 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
70 if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
71 return true;
72 }
73 return false;
74}
75
Tejun Heoeb954192013-08-08 20:11:23 -040076static struct cgroup_subsys_state *
77hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070078{
Tejun Heoeb954192013-08-08 20:11:23 -040079 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
80 struct hugetlb_cgroup *h_cgroup;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070081 int idx;
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070082
83 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
84 if (!h_cgroup)
85 return ERR_PTR(-ENOMEM);
86
Tejun Heoeb954192013-08-08 20:11:23 -040087 if (parent_h_cgroup) {
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070088 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
89 res_counter_init(&h_cgroup->hugepage[idx],
90 &parent_h_cgroup->hugepage[idx]);
91 } else {
92 root_h_cgroup = h_cgroup;
93 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
94 res_counter_init(&h_cgroup->hugepage[idx], NULL);
95 }
96 return &h_cgroup->css;
97}
98
Tejun Heoeb954192013-08-08 20:11:23 -040099static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700100{
101 struct hugetlb_cgroup *h_cgroup;
102
Tejun Heoeb954192013-08-08 20:11:23 -0400103 h_cgroup = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700104 kfree(h_cgroup);
105}
106
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700107
108/*
109 * Should be called with hugetlb_lock held.
110 * Since we are holding hugetlb_lock, pages cannot get moved from
111 * active list or uncharged from the cgroup, So no need to get
112 * page reference and test for page active here. This function
113 * cannot fail.
114 */
Tejun Heo3f798512013-08-08 20:11:22 -0400115static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700116 struct page *page)
117{
118 int csize;
119 struct res_counter *counter;
120 struct res_counter *fail_res;
121 struct hugetlb_cgroup *page_hcg;
Tejun Heo3f798512013-08-08 20:11:22 -0400122 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700123
124 page_hcg = hugetlb_cgroup_from_page(page);
125 /*
126 * We can have pages in active list without any cgroup
127 * ie, hugepage with less than 3 pages. We can safely
128 * ignore those pages.
129 */
130 if (!page_hcg || page_hcg != h_cg)
131 goto out;
132
133 csize = PAGE_SIZE << compound_order(page);
134 if (!parent) {
135 parent = root_h_cgroup;
136 /* root has no limit */
137 res_counter_charge_nofail(&parent->hugepage[idx],
138 csize, &fail_res);
139 }
140 counter = &h_cg->hugepage[idx];
141 res_counter_uncharge_until(counter, counter->parent, csize);
142
143 set_hugetlb_cgroup(page, parent);
144out:
145 return;
146}
147
148/*
149 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
150 * the parent cgroup.
151 */
Tejun Heoeb954192013-08-08 20:11:23 -0400152static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700153{
Tejun Heoeb954192013-08-08 20:11:23 -0400154 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700155 struct hstate *h;
156 struct page *page;
Michal Hocko9d093cb2012-10-26 13:37:33 +0200157 int idx = 0;
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700158
159 do {
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700160 for_each_hstate(h) {
161 spin_lock(&hugetlb_lock);
162 list_for_each_entry(page, &h->hugepage_activelist, lru)
Tejun Heo3f798512013-08-08 20:11:22 -0400163 hugetlb_cgroup_move_parent(idx, h_cg, page);
Aneesh Kumar K.Vda1def52012-07-31 16:42:21 -0700164
165 spin_unlock(&hugetlb_lock);
166 idx++;
167 }
168 cond_resched();
Tejun Heo3f798512013-08-08 20:11:22 -0400169 } while (hugetlb_cgroup_have_usage(h_cg));
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700170}
171
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700172int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
173 struct hugetlb_cgroup **ptr)
174{
175 int ret = 0;
176 struct res_counter *fail_res;
177 struct hugetlb_cgroup *h_cg = NULL;
178 unsigned long csize = nr_pages * PAGE_SIZE;
179
180 if (hugetlb_cgroup_disabled())
181 goto done;
182 /*
183 * We don't charge any cgroup if the compound page have less
184 * than 3 pages.
185 */
186 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
187 goto done;
188again:
189 rcu_read_lock();
190 h_cg = hugetlb_cgroup_from_task(current);
191 if (!css_tryget(&h_cg->css)) {
192 rcu_read_unlock();
193 goto again;
194 }
195 rcu_read_unlock();
196
197 ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
198 css_put(&h_cg->css);
199done:
200 *ptr = h_cg;
201 return ret;
202}
203
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700204/* Should be called with hugetlb_lock held */
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700205void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
206 struct hugetlb_cgroup *h_cg,
207 struct page *page)
208{
209 if (hugetlb_cgroup_disabled() || !h_cg)
210 return;
211
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700212 set_hugetlb_cgroup(page, h_cg);
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -0700213 return;
214}
215
216/*
217 * Should be called with hugetlb_lock held
218 */
219void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
220 struct page *page)
221{
222 struct hugetlb_cgroup *h_cg;
223 unsigned long csize = nr_pages * PAGE_SIZE;
224
225 if (hugetlb_cgroup_disabled())
226 return;
227 VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
228 h_cg = hugetlb_cgroup_from_page(page);
229 if (unlikely(!h_cg))
230 return;
231 set_hugetlb_cgroup(page, NULL);
232 res_counter_uncharge(&h_cg->hugepage[idx], csize);
233 return;
234}
235
236void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
237 struct hugetlb_cgroup *h_cg)
238{
239 unsigned long csize = nr_pages * PAGE_SIZE;
240
241 if (hugetlb_cgroup_disabled() || !h_cg)
242 return;
243
244 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
245 return;
246
247 res_counter_uncharge(&h_cg->hugepage[idx], csize);
248 return;
249}
250
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700251static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
252 struct file *file, char __user *buf,
253 size_t nbytes, loff_t *ppos)
254{
255 u64 val;
256 char str[64];
257 int idx, name, len;
258 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
259
260 idx = MEMFILE_IDX(cft->private);
261 name = MEMFILE_ATTR(cft->private);
262
263 val = res_counter_read_u64(&h_cg->hugepage[idx], name);
264 len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
265 return simple_read_from_buffer(buf, nbytes, ppos, str, len);
266}
267
268static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
269 const char *buffer)
270{
271 int idx, name, ret;
272 unsigned long long val;
273 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
274
275 idx = MEMFILE_IDX(cft->private);
276 name = MEMFILE_ATTR(cft->private);
277
278 switch (name) {
279 case RES_LIMIT:
280 if (hugetlb_cgroup_is_root(h_cg)) {
281 /* Can't set limit on root */
282 ret = -EINVAL;
283 break;
284 }
285 /* This function does all necessary parse...reuse it */
286 ret = res_counter_memparse_write_strategy(buffer, &val);
287 if (ret)
288 break;
289 ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
290 break;
291 default:
292 ret = -EINVAL;
293 break;
294 }
295 return ret;
296}
297
298static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
299{
300 int idx, name, ret = 0;
301 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
302
303 idx = MEMFILE_IDX(event);
304 name = MEMFILE_ATTR(event);
305
306 switch (name) {
307 case RES_MAX_USAGE:
308 res_counter_reset_max(&h_cg->hugepage[idx]);
309 break;
310 case RES_FAILCNT:
311 res_counter_reset_failcnt(&h_cg->hugepage[idx]);
312 break;
313 default:
314 ret = -EINVAL;
315 break;
316 }
317 return ret;
318}
319
320static char *mem_fmt(char *buf, int size, unsigned long hsize)
321{
322 if (hsize >= (1UL << 30))
323 snprintf(buf, size, "%luGB", hsize >> 30);
324 else if (hsize >= (1UL << 20))
325 snprintf(buf, size, "%luMB", hsize >> 20);
326 else
327 snprintf(buf, size, "%luKB", hsize >> 10);
328 return buf;
329}
330
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800331static void __init __hugetlb_cgroup_file_init(int idx)
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700332{
333 char buf[32];
334 struct cftype *cft;
335 struct hstate *h = &hstates[idx];
336
337 /* format the size */
338 mem_fmt(buf, 32, huge_page_size(h));
339
340 /* Add the limit file */
341 cft = &h->cgroup_files[0];
342 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
343 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
344 cft->read = hugetlb_cgroup_read;
345 cft->write_string = hugetlb_cgroup_write;
346
347 /* Add the usage file */
348 cft = &h->cgroup_files[1];
349 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
350 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
351 cft->read = hugetlb_cgroup_read;
352
353 /* Add the MAX usage file */
354 cft = &h->cgroup_files[2];
355 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
356 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
357 cft->trigger = hugetlb_cgroup_reset;
358 cft->read = hugetlb_cgroup_read;
359
360 /* Add the failcntfile */
361 cft = &h->cgroup_files[3];
362 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
363 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
364 cft->trigger = hugetlb_cgroup_reset;
365 cft->read = hugetlb_cgroup_read;
366
367 /* NULL terminate the last cft */
368 cft = &h->cgroup_files[4];
369 memset(cft, 0, sizeof(*cft));
370
371 WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
372
Jianguo Wu7179e7b2012-12-18 14:23:19 -0800373 return;
374}
375
376void __init hugetlb_cgroup_file_init(void)
377{
378 struct hstate *h;
379
380 for_each_hstate(h) {
381 /*
382 * Add cgroup control files only if the huge page consists
383 * of more than two normal pages. This is because we use
384 * page[2].lru.next for storing cgroup details.
385 */
386 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
387 __hugetlb_cgroup_file_init(hstate_index(h));
388 }
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700389}
390
Aneesh Kumar K.V75754682012-07-31 16:42:36 -0700391/*
392 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
393 * when we migrate hugepages
394 */
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700395void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
396{
397 struct hugetlb_cgroup *h_cg;
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700398 struct hstate *h = page_hstate(oldhpage);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700399
400 if (hugetlb_cgroup_disabled())
401 return;
402
403 VM_BUG_ON(!PageHuge(oldhpage));
404 spin_lock(&hugetlb_lock);
405 h_cg = hugetlb_cgroup_from_page(oldhpage);
406 set_hugetlb_cgroup(oldhpage, NULL);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700407
408 /* move the h_cg details to new cgroup */
409 set_hugetlb_cgroup(newhpage, h_cg);
Aneesh Kumar K.V94ae8ba2012-07-31 16:42:35 -0700410 list_move(&newhpage->lru, &h->hugepage_activelist);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700411 spin_unlock(&hugetlb_lock);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -0700412 return;
413}
414
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700415struct cgroup_subsys hugetlb_subsys = {
416 .name = "hugetlb",
Tejun Heo92fb9742012-11-19 08:13:38 -0800417 .css_alloc = hugetlb_cgroup_css_alloc,
418 .css_offline = hugetlb_cgroup_css_offline,
419 .css_free = hugetlb_cgroup_css_free,
420 .subsys_id = hugetlb_subsys_id,
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700421};