Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
| 3 | * Copyright IBM Corporation, 2012 |
| 4 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of version 2.1 of the GNU Lesser General Public License |
| 8 | * as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it would be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/cgroup.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/hugetlb.h> |
| 19 | #include <linux/hugetlb_cgroup.h> |
| 20 | |
| 21 | struct hugetlb_cgroup { |
| 22 | struct cgroup_subsys_state css; |
| 23 | /* |
| 24 | * the counter to account for hugepages from hugetlb. |
| 25 | */ |
| 26 | struct res_counter hugepage[HUGE_MAX_HSTATE]; |
| 27 | }; |
| 28 | |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 29 | #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) |
| 30 | #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) |
| 31 | #define MEMFILE_ATTR(val) ((val) & 0xffff) |
| 32 | |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 33 | static struct hugetlb_cgroup *root_h_cgroup __read_mostly; |
| 34 | |
| 35 | static inline |
| 36 | struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) |
| 37 | { |
Tejun Heo | a7c6d55 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 38 | return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | static inline |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 42 | struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) |
| 43 | { |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame^] | 44 | return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id)); |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) |
| 48 | { |
| 49 | return (h_cg == root_h_cgroup); |
| 50 | } |
| 51 | |
Tejun Heo | 3f79851 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 52 | static inline struct hugetlb_cgroup * |
| 53 | parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg) |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 54 | { |
Tejun Heo | 6387698 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 55 | return hugetlb_cgroup_from_css(css_parent(&h_cg->css)); |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 56 | } |
| 57 | |
Tejun Heo | 3f79851 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 58 | static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg) |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 59 | { |
| 60 | int idx; |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 61 | |
| 62 | for (idx = 0; idx < hugetlb_max_hstate; idx++) { |
| 63 | if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0) |
| 64 | return true; |
| 65 | } |
| 66 | return false; |
| 67 | } |
| 68 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 69 | static struct cgroup_subsys_state * |
| 70 | hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 71 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 72 | struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css); |
| 73 | struct hugetlb_cgroup *h_cgroup; |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 74 | int idx; |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 75 | |
| 76 | h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL); |
| 77 | if (!h_cgroup) |
| 78 | return ERR_PTR(-ENOMEM); |
| 79 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 80 | if (parent_h_cgroup) { |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 81 | for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) |
| 82 | res_counter_init(&h_cgroup->hugepage[idx], |
| 83 | &parent_h_cgroup->hugepage[idx]); |
| 84 | } else { |
| 85 | root_h_cgroup = h_cgroup; |
| 86 | for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) |
| 87 | res_counter_init(&h_cgroup->hugepage[idx], NULL); |
| 88 | } |
| 89 | return &h_cgroup->css; |
| 90 | } |
| 91 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 92 | static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css) |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 93 | { |
| 94 | struct hugetlb_cgroup *h_cgroup; |
| 95 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 96 | h_cgroup = hugetlb_cgroup_from_css(css); |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 97 | kfree(h_cgroup); |
| 98 | } |
| 99 | |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 100 | |
| 101 | /* |
| 102 | * Should be called with hugetlb_lock held. |
| 103 | * Since we are holding hugetlb_lock, pages cannot get moved from |
| 104 | * active list or uncharged from the cgroup, So no need to get |
| 105 | * page reference and test for page active here. This function |
| 106 | * cannot fail. |
| 107 | */ |
Tejun Heo | 3f79851 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 108 | static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 109 | struct page *page) |
| 110 | { |
| 111 | int csize; |
| 112 | struct res_counter *counter; |
| 113 | struct res_counter *fail_res; |
| 114 | struct hugetlb_cgroup *page_hcg; |
Tejun Heo | 3f79851 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 115 | struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg); |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 116 | |
| 117 | page_hcg = hugetlb_cgroup_from_page(page); |
| 118 | /* |
| 119 | * We can have pages in active list without any cgroup |
| 120 | * ie, hugepage with less than 3 pages. We can safely |
| 121 | * ignore those pages. |
| 122 | */ |
| 123 | if (!page_hcg || page_hcg != h_cg) |
| 124 | goto out; |
| 125 | |
| 126 | csize = PAGE_SIZE << compound_order(page); |
| 127 | if (!parent) { |
| 128 | parent = root_h_cgroup; |
| 129 | /* root has no limit */ |
| 130 | res_counter_charge_nofail(&parent->hugepage[idx], |
| 131 | csize, &fail_res); |
| 132 | } |
| 133 | counter = &h_cg->hugepage[idx]; |
| 134 | res_counter_uncharge_until(counter, counter->parent, csize); |
| 135 | |
| 136 | set_hugetlb_cgroup(page, parent); |
| 137 | out: |
| 138 | return; |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Force the hugetlb cgroup to empty the hugetlb resources by moving them to |
| 143 | * the parent cgroup. |
| 144 | */ |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 145 | static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 146 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 147 | struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 148 | struct hstate *h; |
| 149 | struct page *page; |
Michal Hocko | 9d093cb | 2012-10-26 13:37:33 +0200 | [diff] [blame] | 150 | int idx = 0; |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 151 | |
| 152 | do { |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 153 | for_each_hstate(h) { |
| 154 | spin_lock(&hugetlb_lock); |
| 155 | list_for_each_entry(page, &h->hugepage_activelist, lru) |
Tejun Heo | 3f79851 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 156 | hugetlb_cgroup_move_parent(idx, h_cg, page); |
Aneesh Kumar K.V | da1def5 | 2012-07-31 16:42:21 -0700 | [diff] [blame] | 157 | |
| 158 | spin_unlock(&hugetlb_lock); |
| 159 | idx++; |
| 160 | } |
| 161 | cond_resched(); |
Tejun Heo | 3f79851 | 2013-08-08 20:11:22 -0400 | [diff] [blame] | 162 | } while (hugetlb_cgroup_have_usage(h_cg)); |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 163 | } |
| 164 | |
Aneesh Kumar K.V | 6d76dcf | 2012-07-31 16:42:18 -0700 | [diff] [blame] | 165 | int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, |
| 166 | struct hugetlb_cgroup **ptr) |
| 167 | { |
| 168 | int ret = 0; |
| 169 | struct res_counter *fail_res; |
| 170 | struct hugetlb_cgroup *h_cg = NULL; |
| 171 | unsigned long csize = nr_pages * PAGE_SIZE; |
| 172 | |
| 173 | if (hugetlb_cgroup_disabled()) |
| 174 | goto done; |
| 175 | /* |
| 176 | * We don't charge any cgroup if the compound page have less |
| 177 | * than 3 pages. |
| 178 | */ |
| 179 | if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) |
| 180 | goto done; |
| 181 | again: |
| 182 | rcu_read_lock(); |
| 183 | h_cg = hugetlb_cgroup_from_task(current); |
| 184 | if (!css_tryget(&h_cg->css)) { |
| 185 | rcu_read_unlock(); |
| 186 | goto again; |
| 187 | } |
| 188 | rcu_read_unlock(); |
| 189 | |
| 190 | ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res); |
| 191 | css_put(&h_cg->css); |
| 192 | done: |
| 193 | *ptr = h_cg; |
| 194 | return ret; |
| 195 | } |
| 196 | |
Aneesh Kumar K.V | 94ae8ba | 2012-07-31 16:42:35 -0700 | [diff] [blame] | 197 | /* Should be called with hugetlb_lock held */ |
Aneesh Kumar K.V | 6d76dcf | 2012-07-31 16:42:18 -0700 | [diff] [blame] | 198 | void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, |
| 199 | struct hugetlb_cgroup *h_cg, |
| 200 | struct page *page) |
| 201 | { |
| 202 | if (hugetlb_cgroup_disabled() || !h_cg) |
| 203 | return; |
| 204 | |
Aneesh Kumar K.V | 6d76dcf | 2012-07-31 16:42:18 -0700 | [diff] [blame] | 205 | set_hugetlb_cgroup(page, h_cg); |
Aneesh Kumar K.V | 6d76dcf | 2012-07-31 16:42:18 -0700 | [diff] [blame] | 206 | return; |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * Should be called with hugetlb_lock held |
| 211 | */ |
| 212 | void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, |
| 213 | struct page *page) |
| 214 | { |
| 215 | struct hugetlb_cgroup *h_cg; |
| 216 | unsigned long csize = nr_pages * PAGE_SIZE; |
| 217 | |
| 218 | if (hugetlb_cgroup_disabled()) |
| 219 | return; |
| 220 | VM_BUG_ON(!spin_is_locked(&hugetlb_lock)); |
| 221 | h_cg = hugetlb_cgroup_from_page(page); |
| 222 | if (unlikely(!h_cg)) |
| 223 | return; |
| 224 | set_hugetlb_cgroup(page, NULL); |
| 225 | res_counter_uncharge(&h_cg->hugepage[idx], csize); |
| 226 | return; |
| 227 | } |
| 228 | |
| 229 | void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, |
| 230 | struct hugetlb_cgroup *h_cg) |
| 231 | { |
| 232 | unsigned long csize = nr_pages * PAGE_SIZE; |
| 233 | |
| 234 | if (hugetlb_cgroup_disabled() || !h_cg) |
| 235 | return; |
| 236 | |
| 237 | if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) |
| 238 | return; |
| 239 | |
| 240 | res_counter_uncharge(&h_cg->hugepage[idx], csize); |
| 241 | return; |
| 242 | } |
| 243 | |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 244 | static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, |
| 245 | struct cftype *cft) |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 246 | { |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 247 | int idx, name; |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 248 | struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 249 | |
| 250 | idx = MEMFILE_IDX(cft->private); |
| 251 | name = MEMFILE_ATTR(cft->private); |
| 252 | |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 253 | return res_counter_read_u64(&h_cg->hugepage[idx], name); |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 254 | } |
| 255 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 256 | static int hugetlb_cgroup_write(struct cgroup_subsys_state *css, |
| 257 | struct cftype *cft, const char *buffer) |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 258 | { |
| 259 | int idx, name, ret; |
| 260 | unsigned long long val; |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 261 | struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 262 | |
| 263 | idx = MEMFILE_IDX(cft->private); |
| 264 | name = MEMFILE_ATTR(cft->private); |
| 265 | |
| 266 | switch (name) { |
| 267 | case RES_LIMIT: |
| 268 | if (hugetlb_cgroup_is_root(h_cg)) { |
| 269 | /* Can't set limit on root */ |
| 270 | ret = -EINVAL; |
| 271 | break; |
| 272 | } |
| 273 | /* This function does all necessary parse...reuse it */ |
| 274 | ret = res_counter_memparse_write_strategy(buffer, &val); |
| 275 | if (ret) |
| 276 | break; |
| 277 | ret = res_counter_set_limit(&h_cg->hugepage[idx], val); |
| 278 | break; |
| 279 | default: |
| 280 | ret = -EINVAL; |
| 281 | break; |
| 282 | } |
| 283 | return ret; |
| 284 | } |
| 285 | |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 286 | static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css, |
| 287 | unsigned int event) |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 288 | { |
| 289 | int idx, name, ret = 0; |
Tejun Heo | 182446d | 2013-08-08 20:11:24 -0400 | [diff] [blame] | 290 | struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 291 | |
| 292 | idx = MEMFILE_IDX(event); |
| 293 | name = MEMFILE_ATTR(event); |
| 294 | |
| 295 | switch (name) { |
| 296 | case RES_MAX_USAGE: |
| 297 | res_counter_reset_max(&h_cg->hugepage[idx]); |
| 298 | break; |
| 299 | case RES_FAILCNT: |
| 300 | res_counter_reset_failcnt(&h_cg->hugepage[idx]); |
| 301 | break; |
| 302 | default: |
| 303 | ret = -EINVAL; |
| 304 | break; |
| 305 | } |
| 306 | return ret; |
| 307 | } |
| 308 | |
| 309 | static char *mem_fmt(char *buf, int size, unsigned long hsize) |
| 310 | { |
| 311 | if (hsize >= (1UL << 30)) |
| 312 | snprintf(buf, size, "%luGB", hsize >> 30); |
| 313 | else if (hsize >= (1UL << 20)) |
| 314 | snprintf(buf, size, "%luMB", hsize >> 20); |
| 315 | else |
| 316 | snprintf(buf, size, "%luKB", hsize >> 10); |
| 317 | return buf; |
| 318 | } |
| 319 | |
Jianguo Wu | 7179e7b | 2012-12-18 14:23:19 -0800 | [diff] [blame] | 320 | static void __init __hugetlb_cgroup_file_init(int idx) |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 321 | { |
| 322 | char buf[32]; |
| 323 | struct cftype *cft; |
| 324 | struct hstate *h = &hstates[idx]; |
| 325 | |
| 326 | /* format the size */ |
| 327 | mem_fmt(buf, 32, huge_page_size(h)); |
| 328 | |
| 329 | /* Add the limit file */ |
| 330 | cft = &h->cgroup_files[0]; |
| 331 | snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf); |
| 332 | cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT); |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 333 | cft->read_u64 = hugetlb_cgroup_read_u64; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 334 | cft->write_string = hugetlb_cgroup_write; |
| 335 | |
| 336 | /* Add the usage file */ |
| 337 | cft = &h->cgroup_files[1]; |
| 338 | snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf); |
| 339 | cft->private = MEMFILE_PRIVATE(idx, RES_USAGE); |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 340 | cft->read_u64 = hugetlb_cgroup_read_u64; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 341 | |
| 342 | /* Add the MAX usage file */ |
| 343 | cft = &h->cgroup_files[2]; |
| 344 | snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf); |
| 345 | cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE); |
| 346 | cft->trigger = hugetlb_cgroup_reset; |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 347 | cft->read_u64 = hugetlb_cgroup_read_u64; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 348 | |
| 349 | /* Add the failcntfile */ |
| 350 | cft = &h->cgroup_files[3]; |
| 351 | snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf); |
| 352 | cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT); |
| 353 | cft->trigger = hugetlb_cgroup_reset; |
Tejun Heo | 716f479 | 2013-12-05 12:28:03 -0500 | [diff] [blame] | 354 | cft->read_u64 = hugetlb_cgroup_read_u64; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 355 | |
| 356 | /* NULL terminate the last cft */ |
| 357 | cft = &h->cgroup_files[4]; |
| 358 | memset(cft, 0, sizeof(*cft)); |
| 359 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame^] | 360 | WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files)); |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 361 | |
Jianguo Wu | 7179e7b | 2012-12-18 14:23:19 -0800 | [diff] [blame] | 362 | return; |
| 363 | } |
| 364 | |
| 365 | void __init hugetlb_cgroup_file_init(void) |
| 366 | { |
| 367 | struct hstate *h; |
| 368 | |
| 369 | for_each_hstate(h) { |
| 370 | /* |
| 371 | * Add cgroup control files only if the huge page consists |
| 372 | * of more than two normal pages. This is because we use |
| 373 | * page[2].lru.next for storing cgroup details. |
| 374 | */ |
| 375 | if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) |
| 376 | __hugetlb_cgroup_file_init(hstate_index(h)); |
| 377 | } |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 378 | } |
| 379 | |
Aneesh Kumar K.V | 7575468 | 2012-07-31 16:42:36 -0700 | [diff] [blame] | 380 | /* |
| 381 | * hugetlb_lock will make sure a parallel cgroup rmdir won't happen |
| 382 | * when we migrate hugepages |
| 383 | */ |
Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 384 | void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) |
| 385 | { |
| 386 | struct hugetlb_cgroup *h_cg; |
Aneesh Kumar K.V | 94ae8ba | 2012-07-31 16:42:35 -0700 | [diff] [blame] | 387 | struct hstate *h = page_hstate(oldhpage); |
Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 388 | |
| 389 | if (hugetlb_cgroup_disabled()) |
| 390 | return; |
| 391 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 392 | VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); |
Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 393 | spin_lock(&hugetlb_lock); |
| 394 | h_cg = hugetlb_cgroup_from_page(oldhpage); |
| 395 | set_hugetlb_cgroup(oldhpage, NULL); |
Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 396 | |
| 397 | /* move the h_cg details to new cgroup */ |
| 398 | set_hugetlb_cgroup(newhpage, h_cg); |
Aneesh Kumar K.V | 94ae8ba | 2012-07-31 16:42:35 -0700 | [diff] [blame] | 399 | list_move(&newhpage->lru, &h->hugepage_activelist); |
Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 400 | spin_unlock(&hugetlb_lock); |
Aneesh Kumar K.V | 8e6ac7f | 2012-07-31 16:42:27 -0700 | [diff] [blame] | 401 | return; |
| 402 | } |
| 403 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame^] | 404 | struct cgroup_subsys hugetlb_cgrp_subsys = { |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 405 | .css_alloc = hugetlb_cgroup_css_alloc, |
| 406 | .css_offline = hugetlb_cgroup_css_offline, |
| 407 | .css_free = hugetlb_cgroup_css_free, |
Aneesh Kumar K.V | 2bc64a2 | 2012-07-31 16:42:12 -0700 | [diff] [blame] | 408 | }; |