KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/mmzone.h> |
| 3 | #include <linux/bootmem.h> |
| 4 | #include <linux/bit_spinlock.h> |
| 5 | #include <linux/page_cgroup.h> |
| 6 | #include <linux/hash.h> |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 7 | #include <linux/slab.h> |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 8 | #include <linux/memory.h> |
Paul Mundt | 4c821042 | 2008-10-22 14:14:58 -0700 | [diff] [blame] | 9 | #include <linux/vmalloc.h> |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 10 | #include <linux/cgroup.h> |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 11 | #include <linux/swapops.h> |
Catalin Marinas | 7952f98 | 2010-07-19 11:54:14 +0100 | [diff] [blame] | 12 | #include <linux/kmemleak.h> |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 13 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 14 | static unsigned long total_usage; |
| 15 | |
| 16 | #if !defined(CONFIG_SPARSEMEM) |
| 17 | |
| 18 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 19 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 20 | { |
| 21 | pgdat->node_page_cgroup = NULL; |
| 22 | } |
| 23 | |
| 24 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 25 | { |
| 26 | unsigned long pfn = page_to_pfn(page); |
| 27 | unsigned long offset; |
| 28 | struct page_cgroup *base; |
| 29 | |
| 30 | base = NODE_DATA(page_to_nid(page))->node_page_cgroup; |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 31 | #ifdef CONFIG_DEBUG_VM |
| 32 | /* |
| 33 | * The sanity checks the page allocator does upon freeing a |
| 34 | * page can reach here before the page_cgroup arrays are |
| 35 | * allocated when feeding a range of pages to the allocator |
| 36 | * for the first time during bootup or memory hotplug. |
| 37 | */ |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 38 | if (unlikely(!base)) |
| 39 | return NULL; |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 40 | #endif |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 41 | offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; |
| 42 | return base + offset; |
| 43 | } |
| 44 | |
| 45 | static int __init alloc_node_page_cgroup(int nid) |
| 46 | { |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 47 | struct page_cgroup *base; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 48 | unsigned long table_size; |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 49 | unsigned long nr_pages; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 50 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 51 | nr_pages = NODE_DATA(nid)->node_spanned_pages; |
KAMEZAWA Hiroyuki | 653d22c | 2008-12-09 13:14:20 -0800 | [diff] [blame] | 52 | if (!nr_pages) |
| 53 | return 0; |
| 54 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 55 | table_size = sizeof(struct page_cgroup) * nr_pages; |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 56 | |
| 57 | base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), |
| 58 | table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
| 59 | if (!base) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 60 | return -ENOMEM; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 61 | NODE_DATA(nid)->node_page_cgroup = base; |
| 62 | total_usage += table_size; |
| 63 | return 0; |
| 64 | } |
| 65 | |
KAMEZAWA Hiroyuki | ca371c0 | 2009-06-12 10:33:53 +0300 | [diff] [blame] | 66 | void __init page_cgroup_init_flatmem(void) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 67 | { |
| 68 | |
| 69 | int nid, fail; |
| 70 | |
Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 71 | if (mem_cgroup_disabled()) |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 72 | return; |
| 73 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 74 | for_each_online_node(nid) { |
| 75 | fail = alloc_node_page_cgroup(nid); |
| 76 | if (fail) |
| 77 | goto fail; |
| 78 | } |
| 79 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 80 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" |
| 81 | " don't want memory cgroups\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 82 | return; |
| 83 | fail: |
Randy Dunlap | 8ca739e | 2009-06-17 16:26:32 -0700 | [diff] [blame] | 84 | printk(KERN_CRIT "allocation of page_cgroup failed.\n"); |
| 85 | printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 86 | panic("Out of memory"); |
| 87 | } |
| 88 | |
| 89 | #else /* CONFIG_FLAT_NODE_MEM_MAP */ |
| 90 | |
| 91 | struct page_cgroup *lookup_page_cgroup(struct page *page) |
| 92 | { |
| 93 | unsigned long pfn = page_to_pfn(page); |
| 94 | struct mem_section *section = __pfn_to_section(pfn); |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 95 | #ifdef CONFIG_DEBUG_VM |
| 96 | /* |
| 97 | * The sanity checks the page allocator does upon freeing a |
| 98 | * page can reach here before the page_cgroup arrays are |
| 99 | * allocated when feeding a range of pages to the allocator |
| 100 | * for the first time during bootup or memory hotplug. |
| 101 | */ |
Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 102 | if (!section->page_cgroup) |
| 103 | return NULL; |
Johannes Weiner | 00c54c0 | 2012-01-12 17:18:40 -0800 | [diff] [blame] | 104 | #endif |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 105 | return section->page_cgroup + pfn; |
| 106 | } |
| 107 | |
Namhyung Kim | 268433b | 2011-05-26 16:25:29 -0700 | [diff] [blame] | 108 | static void *__meminit alloc_page_cgroup(size_t size, int nid) |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 109 | { |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 110 | gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 111 | void *addr = NULL; |
| 112 | |
Steven Rostedt | ff7ee93 | 2011-11-02 13:38:11 -0700 | [diff] [blame] | 113 | addr = alloc_pages_exact_nid(nid, size, flags); |
| 114 | if (addr) { |
| 115 | kmemleak_alloc(addr, size, 1, flags); |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 116 | return addr; |
Steven Rostedt | ff7ee93 | 2011-11-02 13:38:11 -0700 | [diff] [blame] | 117 | } |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 118 | |
| 119 | if (node_state(nid, N_HIGH_MEMORY)) |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 120 | addr = vzalloc_node(size, nid); |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 121 | else |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 122 | addr = vzalloc(size); |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 123 | |
| 124 | return addr; |
| 125 | } |
| 126 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 127 | static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 128 | { |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 129 | struct mem_section *section; |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 130 | struct page_cgroup *base; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 131 | unsigned long table_size; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 132 | |
Johannes Weiner | 6b208e3 | 2012-01-12 17:18:18 -0800 | [diff] [blame] | 133 | section = __pfn_to_section(pfn); |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 134 | |
| 135 | if (section->page_cgroup) |
| 136 | return 0; |
| 137 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 138 | table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 139 | base = alloc_page_cgroup(table_size, nid); |
| 140 | |
Johannes Weiner | 6b3ae58 | 2011-03-23 16:42:30 -0700 | [diff] [blame] | 141 | /* |
| 142 | * The value stored in section->page_cgroup is (base - pfn) |
| 143 | * and it does not point to the memory block allocated above, |
| 144 | * causing kmemleak false positives. |
| 145 | */ |
| 146 | kmemleak_not_leak(base); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 147 | |
| 148 | if (!base) { |
| 149 | printk(KERN_ERR "page cgroup allocation failure\n"); |
| 150 | return -ENOMEM; |
| 151 | } |
| 152 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 153 | /* |
| 154 | * The passed "pfn" may not be aligned to SECTION. For the calculation |
| 155 | * we need to apply a mask. |
| 156 | */ |
| 157 | pfn &= PAGE_SECTION_MASK; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 158 | section->page_cgroup = base - pfn; |
| 159 | total_usage += table_size; |
| 160 | return 0; |
| 161 | } |
| 162 | #ifdef CONFIG_MEMORY_HOTPLUG |
Bob Liu | 0efc8eb | 2012-01-12 17:19:08 -0800 | [diff] [blame] | 163 | static void free_page_cgroup(void *addr) |
| 164 | { |
| 165 | if (is_vmalloc_addr(addr)) { |
| 166 | vfree(addr); |
| 167 | } else { |
| 168 | struct page *page = virt_to_page(addr); |
| 169 | size_t table_size = |
| 170 | sizeof(struct page_cgroup) * PAGES_PER_SECTION; |
| 171 | |
| 172 | BUG_ON(PageReserved(page)); |
| 173 | free_pages_exact(addr, table_size); |
| 174 | } |
| 175 | } |
| 176 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 177 | void __free_page_cgroup(unsigned long pfn) |
| 178 | { |
| 179 | struct mem_section *ms; |
| 180 | struct page_cgroup *base; |
| 181 | |
| 182 | ms = __pfn_to_section(pfn); |
| 183 | if (!ms || !ms->page_cgroup) |
| 184 | return; |
| 185 | base = ms->page_cgroup + pfn; |
Michal Hocko | dde79e0 | 2011-03-23 16:42:40 -0700 | [diff] [blame] | 186 | free_page_cgroup(base); |
| 187 | ms->page_cgroup = NULL; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 190 | int __meminit online_page_cgroup(unsigned long start_pfn, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 191 | unsigned long nr_pages, |
| 192 | int nid) |
| 193 | { |
| 194 | unsigned long start, end, pfn; |
| 195 | int fail = 0; |
| 196 | |
Daniel Kiper | 1bb36fb | 2011-07-25 17:12:13 -0700 | [diff] [blame] | 197 | start = SECTION_ALIGN_DOWN(start_pfn); |
| 198 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 199 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 200 | if (nid == -1) { |
| 201 | /* |
| 202 | * In this case, "nid" already exists and contains valid memory. |
| 203 | * "start_pfn" passed to us is a pfn which is an arg for |
| 204 | * online__pages(), and start_pfn should exist. |
| 205 | */ |
| 206 | nid = pfn_to_nid(start_pfn); |
| 207 | VM_BUG_ON(!node_state(nid, N_ONLINE)); |
| 208 | } |
| 209 | |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 210 | for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { |
| 211 | if (!pfn_present(pfn)) |
| 212 | continue; |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 213 | fail = init_section_page_cgroup(pfn, nid); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 214 | } |
| 215 | if (!fail) |
| 216 | return 0; |
| 217 | |
| 218 | /* rollback */ |
| 219 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
| 220 | __free_page_cgroup(pfn); |
| 221 | |
| 222 | return -ENOMEM; |
| 223 | } |
| 224 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 225 | int __meminit offline_page_cgroup(unsigned long start_pfn, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 226 | unsigned long nr_pages, int nid) |
| 227 | { |
| 228 | unsigned long start, end, pfn; |
| 229 | |
Daniel Kiper | 1bb36fb | 2011-07-25 17:12:13 -0700 | [diff] [blame] | 230 | start = SECTION_ALIGN_DOWN(start_pfn); |
| 231 | end = SECTION_ALIGN_UP(start_pfn + nr_pages); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 232 | |
| 233 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) |
| 234 | __free_page_cgroup(pfn); |
| 235 | return 0; |
| 236 | |
| 237 | } |
| 238 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 239 | static int __meminit page_cgroup_callback(struct notifier_block *self, |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 240 | unsigned long action, void *arg) |
| 241 | { |
| 242 | struct memory_notify *mn = arg; |
| 243 | int ret = 0; |
| 244 | switch (action) { |
| 245 | case MEM_GOING_ONLINE: |
| 246 | ret = online_page_cgroup(mn->start_pfn, |
| 247 | mn->nr_pages, mn->status_change_nid); |
| 248 | break; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 249 | case MEM_OFFLINE: |
| 250 | offline_page_cgroup(mn->start_pfn, |
| 251 | mn->nr_pages, mn->status_change_nid); |
| 252 | break; |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 253 | case MEM_CANCEL_ONLINE: |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 254 | case MEM_GOING_OFFLINE: |
| 255 | break; |
| 256 | case MEM_ONLINE: |
| 257 | case MEM_CANCEL_OFFLINE: |
| 258 | break; |
| 259 | } |
KAMEZAWA Hiroyuki | dc19f9d | 2008-12-01 13:13:48 -0800 | [diff] [blame] | 260 | |
Prarit Bhargava | 5fda1bd | 2011-03-22 16:30:49 -0700 | [diff] [blame] | 261 | return notifier_from_errno(ret); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | #endif |
| 265 | |
| 266 | void __init page_cgroup_init(void) |
| 267 | { |
| 268 | unsigned long pfn; |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 269 | int nid; |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 270 | |
Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 271 | if (mem_cgroup_disabled()) |
KAMEZAWA Hiroyuki | 94b6da5 | 2008-10-22 14:15:05 -0700 | [diff] [blame] | 272 | return; |
| 273 | |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 274 | for_each_node_state(nid, N_HIGH_MEMORY) { |
| 275 | unsigned long start_pfn, end_pfn; |
| 276 | |
| 277 | start_pfn = node_start_pfn(nid); |
| 278 | end_pfn = node_end_pfn(nid); |
| 279 | /* |
| 280 | * start_pfn and end_pfn may not be aligned to SECTION and the |
| 281 | * page->flags of out of node pages are not initialized. So we |
| 282 | * scan [start_pfn, the biggest section's pfn < end_pfn) here. |
| 283 | */ |
| 284 | for (pfn = start_pfn; |
| 285 | pfn < end_pfn; |
| 286 | pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { |
| 287 | |
| 288 | if (!pfn_valid(pfn)) |
| 289 | continue; |
| 290 | /* |
| 291 | * Nodes's pfns can be overlapping. |
| 292 | * We know some arch can have a nodes layout such as |
| 293 | * -------------pfn--------------> |
| 294 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
| 295 | */ |
| 296 | if (pfn_to_nid(pfn) != nid) |
| 297 | continue; |
| 298 | if (init_section_page_cgroup(pfn, nid)) |
| 299 | goto oom; |
| 300 | } |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 301 | } |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 302 | hotplug_memory_notifier(page_cgroup_callback, 0); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 303 | printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); |
KAMEZAWA Hiroyuki | 37573e8 | 2011-06-15 15:08:42 -0700 | [diff] [blame] | 304 | printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " |
| 305 | "don't want memory cgroups\n"); |
| 306 | return; |
| 307 | oom: |
| 308 | printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); |
| 309 | panic("Out of memory"); |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 310 | } |
| 311 | |
Al Viro | 3116848 | 2008-11-22 17:33:24 +0000 | [diff] [blame] | 312 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) |
KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 313 | { |
| 314 | return; |
| 315 | } |
| 316 | |
| 317 | #endif |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 318 | |
| 319 | |
| 320 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
| 321 | |
| 322 | static DEFINE_MUTEX(swap_cgroup_mutex); |
| 323 | struct swap_cgroup_ctrl { |
| 324 | struct page **map; |
| 325 | unsigned long length; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 326 | spinlock_t lock; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 327 | }; |
| 328 | |
H Hartley Sweeten | 61600f5 | 2011-11-02 13:38:36 -0700 | [diff] [blame] | 329 | static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 330 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 331 | struct swap_cgroup { |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 332 | unsigned short id; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 333 | }; |
| 334 | #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 335 | |
| 336 | /* |
| 337 | * SwapCgroup implements "lookup" and "exchange" operations. |
| 338 | * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge |
| 339 | * against SwapCache. At swap_free(), this is accessed directly from swap. |
| 340 | * |
| 341 | * This means, |
| 342 | * - we have no race in "exchange" when we're accessed via SwapCache because |
| 343 | * SwapCache(and its swp_entry) is under lock. |
| 344 | * - When called via swap_free(), there is no user of this entry and no race. |
| 345 | * Then, we don't need lock around "exchange". |
| 346 | * |
| 347 | * TODO: we can push these buffers out to HIGHMEM. |
| 348 | */ |
| 349 | |
| 350 | /* |
| 351 | * allocate buffer for swap_cgroup. |
| 352 | */ |
| 353 | static int swap_cgroup_prepare(int type) |
| 354 | { |
| 355 | struct page *page; |
| 356 | struct swap_cgroup_ctrl *ctrl; |
| 357 | unsigned long idx, max; |
| 358 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 359 | ctrl = &swap_cgroup_ctrl[type]; |
| 360 | |
| 361 | for (idx = 0; idx < ctrl->length; idx++) { |
| 362 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 363 | if (!page) |
| 364 | goto not_enough_page; |
| 365 | ctrl->map[idx] = page; |
| 366 | } |
| 367 | return 0; |
| 368 | not_enough_page: |
| 369 | max = idx; |
| 370 | for (idx = 0; idx < max; idx++) |
| 371 | __free_page(ctrl->map[idx]); |
| 372 | |
| 373 | return -ENOMEM; |
| 374 | } |
| 375 | |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 376 | static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, |
| 377 | struct swap_cgroup_ctrl **ctrlp) |
| 378 | { |
| 379 | pgoff_t offset = swp_offset(ent); |
| 380 | struct swap_cgroup_ctrl *ctrl; |
| 381 | struct page *mappage; |
| 382 | |
| 383 | ctrl = &swap_cgroup_ctrl[swp_type(ent)]; |
| 384 | if (ctrlp) |
| 385 | *ctrlp = ctrl; |
| 386 | |
| 387 | mappage = ctrl->map[offset / SC_PER_PAGE]; |
| 388 | return page_address(mappage) + offset % SC_PER_PAGE; |
| 389 | } |
| 390 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 391 | /** |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 392 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
| 393 | * @end: swap entry to be cmpxchged |
| 394 | * @old: old id |
| 395 | * @new: new id |
| 396 | * |
| 397 | * Returns old id at success, 0 at failure. |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 398 | * (There is no mem_cgroup using 0 as its id) |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 399 | */ |
| 400 | unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, |
| 401 | unsigned short old, unsigned short new) |
| 402 | { |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 403 | struct swap_cgroup_ctrl *ctrl; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 404 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 405 | unsigned long flags; |
| 406 | unsigned short retval; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 407 | |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 408 | sc = lookup_swap_cgroup(ent, &ctrl); |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 409 | |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 410 | spin_lock_irqsave(&ctrl->lock, flags); |
| 411 | retval = sc->id; |
| 412 | if (retval == old) |
| 413 | sc->id = new; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 414 | else |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 415 | retval = 0; |
| 416 | spin_unlock_irqrestore(&ctrl->lock, flags); |
| 417 | return retval; |
Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | /** |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 421 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
| 422 | * @ent: swap entry to be recorded into |
| 423 | * @mem: mem_cgroup to be recorded |
| 424 | * |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 425 | * Returns old value at success, 0 at failure. |
| 426 | * (Of course, old value can be 0.) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 427 | */ |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 428 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 429 | { |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 430 | struct swap_cgroup_ctrl *ctrl; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 431 | struct swap_cgroup *sc; |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 432 | unsigned short old; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 433 | unsigned long flags; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 434 | |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 435 | sc = lookup_swap_cgroup(ent, &ctrl); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 436 | |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 437 | spin_lock_irqsave(&ctrl->lock, flags); |
| 438 | old = sc->id; |
| 439 | sc->id = id; |
| 440 | spin_unlock_irqrestore(&ctrl->lock, flags); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 441 | |
| 442 | return old; |
| 443 | } |
| 444 | |
| 445 | /** |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 446 | * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 447 | * @ent: swap entry to be looked up. |
| 448 | * |
KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 449 | * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 450 | */ |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 451 | unsigned short lookup_swap_cgroup_id(swp_entry_t ent) |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 452 | { |
Bob Liu | 9fb4b7c | 2012-01-12 17:18:48 -0800 | [diff] [blame] | 453 | return lookup_swap_cgroup(ent, NULL)->id; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 454 | } |
| 455 | |
| 456 | int swap_cgroup_swapon(int type, unsigned long max_pages) |
| 457 | { |
| 458 | void *array; |
| 459 | unsigned long array_size; |
| 460 | unsigned long length; |
| 461 | struct swap_cgroup_ctrl *ctrl; |
| 462 | |
| 463 | if (!do_swap_account) |
| 464 | return 0; |
| 465 | |
Namhyung Kim | 33278f7 | 2011-05-26 16:25:30 -0700 | [diff] [blame] | 466 | length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 467 | array_size = length * sizeof(void *); |
| 468 | |
Joe Perches | 8c1fec1 | 2011-05-28 10:36:34 -0700 | [diff] [blame] | 469 | array = vzalloc(array_size); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 470 | if (!array) |
| 471 | goto nomem; |
| 472 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 473 | ctrl = &swap_cgroup_ctrl[type]; |
| 474 | mutex_lock(&swap_cgroup_mutex); |
| 475 | ctrl->length = length; |
| 476 | ctrl->map = array; |
KAMEZAWA Hiroyuki | e9e58a4 | 2010-03-15 00:34:57 -0400 | [diff] [blame] | 477 | spin_lock_init(&ctrl->lock); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 478 | if (swap_cgroup_prepare(type)) { |
| 479 | /* memory shortage */ |
| 480 | ctrl->map = NULL; |
| 481 | ctrl->length = 0; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 482 | mutex_unlock(&swap_cgroup_mutex); |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 483 | vfree(array); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 484 | goto nomem; |
| 485 | } |
| 486 | mutex_unlock(&swap_cgroup_mutex); |
| 487 | |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 488 | return 0; |
| 489 | nomem: |
| 490 | printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); |
| 491 | printk(KERN_INFO |
WANG Cong | 00a66d2 | 2011-07-25 17:12:12 -0700 | [diff] [blame] | 492 | "swap_cgroup can be disabled by swapaccount=0 boot option\n"); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 493 | return -ENOMEM; |
| 494 | } |
| 495 | |
| 496 | void swap_cgroup_swapoff(int type) |
| 497 | { |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 498 | struct page **map; |
| 499 | unsigned long i, length; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 500 | struct swap_cgroup_ctrl *ctrl; |
| 501 | |
| 502 | if (!do_swap_account) |
| 503 | return; |
| 504 | |
| 505 | mutex_lock(&swap_cgroup_mutex); |
| 506 | ctrl = &swap_cgroup_ctrl[type]; |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 507 | map = ctrl->map; |
| 508 | length = ctrl->length; |
| 509 | ctrl->map = NULL; |
| 510 | ctrl->length = 0; |
| 511 | mutex_unlock(&swap_cgroup_mutex); |
| 512 | |
| 513 | if (map) { |
| 514 | for (i = 0; i < length; i++) { |
| 515 | struct page *page = map[i]; |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 516 | if (page) |
| 517 | __free_page(page); |
| 518 | } |
Namhyung Kim | 6a5b18d | 2011-05-26 16:25:31 -0700 | [diff] [blame] | 519 | vfree(map); |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 520 | } |
KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | #endif |