blob: 6c0081441a326a174f83ebd66485bd9cb961f88d [file] [log] [blame]
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001#include <linux/mm.h>
2#include <linux/mmzone.h>
3#include <linux/bootmem.h>
4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h>
6#include <linux/hash.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -07007#include <linux/slab.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07008#include <linux/memory.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07009#include <linux/vmalloc.h>
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070010#include <linux/cgroup.h>
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080011#include <linux/swapops.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070012
13static void __meminit
14__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
15{
16 pc->flags = 0;
17 pc->mem_cgroup = NULL;
18 pc->page = pfn_to_page(pfn);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080019 INIT_LIST_HEAD(&pc->lru);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070020}
21static unsigned long total_usage;
22
23#if !defined(CONFIG_SPARSEMEM)
24
25
Al Viro31168482008-11-22 17:33:24 +000026void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070027{
28 pgdat->node_page_cgroup = NULL;
29}
30
31struct page_cgroup *lookup_page_cgroup(struct page *page)
32{
33 unsigned long pfn = page_to_pfn(page);
34 unsigned long offset;
35 struct page_cgroup *base;
36
37 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
38 if (unlikely(!base))
39 return NULL;
40
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
42 return base + offset;
43}
44
45static int __init alloc_node_page_cgroup(int nid)
46{
47 struct page_cgroup *base, *pc;
48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index;
50
51 start_pfn = NODE_DATA(nid)->node_start_pfn;
52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
53
KAMEZAWA Hiroyuki653d22c2008-12-09 13:14:20 -080054 if (!nr_pages)
55 return 0;
56
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070057 table_size = sizeof(struct page_cgroup) * nr_pages;
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030058
59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
61 if (!base)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070062 return -ENOMEM;
63 for (index = 0; index < nr_pages; index++) {
64 pc = base + index;
65 __init_page_cgroup(pc, start_pfn + index);
66 }
67 NODE_DATA(nid)->node_page_cgroup = base;
68 total_usage += table_size;
69 return 0;
70}
71
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +030072void __init page_cgroup_init_flatmem(void)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070073{
74
75 int nid, fail;
76
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080077 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -070078 return;
79
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070080 for_each_online_node(nid) {
81 fail = alloc_node_page_cgroup(nid);
82 if (fail)
83 goto fail;
84 }
85 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
Randy Dunlap8ca739e2009-06-17 16:26:32 -070086 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
87 " don't want memory cgroups\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070088 return;
89fail:
Randy Dunlap8ca739e2009-06-17 16:26:32 -070090 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
91 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070092 panic("Out of memory");
93}
94
95#else /* CONFIG_FLAT_NODE_MEM_MAP */
96
97struct page_cgroup *lookup_page_cgroup(struct page *page)
98{
99 unsigned long pfn = page_to_pfn(page);
100 struct mem_section *section = __pfn_to_section(pfn);
101
Balbir Singhd69b0422009-06-17 16:26:34 -0700102 if (!section->page_cgroup)
103 return NULL;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700104 return section->page_cgroup + pfn;
105}
106
Al Viro31168482008-11-22 17:33:24 +0000107/* __alloc_bootmem...() is protected by !slab_available() */
KOSAKI Motohirofeb16692009-01-06 14:39:43 -0800108static int __init_refok init_section_page_cgroup(unsigned long pfn)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700109{
Fernando Luis Vazquez Cao0753b0e2009-01-07 18:07:51 -0800110 struct mem_section *section = __pfn_to_section(pfn);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700111 struct page_cgroup *base, *pc;
112 unsigned long table_size;
113 int nid, index;
114
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800115 if (!section->page_cgroup) {
116 nid = page_to_nid(pfn_to_page(pfn));
117 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +0300118 VM_BUG_ON(!slab_is_available());
Shaohua Lif52407c2009-09-21 17:01:19 -0700119 if (node_state(nid, N_HIGH_MEMORY)) {
120 base = kmalloc_node(table_size,
KAMEZAWA Hiroyukica371c02009-06-12 10:33:53 +0300121 GFP_KERNEL | __GFP_NOWARN, nid);
Shaohua Lif52407c2009-09-21 17:01:19 -0700122 if (!base)
123 base = vmalloc_node(table_size, nid);
124 } else {
125 base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
126 if (!base)
127 base = vmalloc(table_size);
128 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800129 } else {
130 /*
131 * We don't have to allocate page_cgroup again, but
132 * address of memmap may be changed. So, we have to initialize
133 * again.
134 */
135 base = section->page_cgroup + pfn;
136 table_size = 0;
137 /* check address of memmap is changed or not. */
138 if (base->page == pfn_to_page(pfn))
139 return 0;
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700140 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700141
142 if (!base) {
143 printk(KERN_ERR "page cgroup allocation failure\n");
144 return -ENOMEM;
145 }
146
147 for (index = 0; index < PAGES_PER_SECTION; index++) {
148 pc = base + index;
149 __init_page_cgroup(pc, pfn + index);
150 }
151
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700152 section->page_cgroup = base - pfn;
153 total_usage += table_size;
154 return 0;
155}
156#ifdef CONFIG_MEMORY_HOTPLUG
157void __free_page_cgroup(unsigned long pfn)
158{
159 struct mem_section *ms;
160 struct page_cgroup *base;
161
162 ms = __pfn_to_section(pfn);
163 if (!ms || !ms->page_cgroup)
164 return;
165 base = ms->page_cgroup + pfn;
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700166 if (is_vmalloc_addr(base)) {
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700167 vfree(base);
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700168 ms->page_cgroup = NULL;
169 } else {
170 struct page *page = virt_to_page(base);
171 if (!PageReserved(page)) { /* Is bootmem ? */
172 kfree(base);
173 ms->page_cgroup = NULL;
174 }
175 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700176}
177
Al Viro31168482008-11-22 17:33:24 +0000178int __meminit online_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700179 unsigned long nr_pages,
180 int nid)
181{
182 unsigned long start, end, pfn;
183 int fail = 0;
184
KAMEZAWA Hiroyuki33c5d3d2008-11-12 13:27:01 -0800185 start = start_pfn & ~(PAGES_PER_SECTION - 1);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700186 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
187
188 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
189 if (!pfn_present(pfn))
190 continue;
191 fail = init_section_page_cgroup(pfn);
192 }
193 if (!fail)
194 return 0;
195
196 /* rollback */
197 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
198 __free_page_cgroup(pfn);
199
200 return -ENOMEM;
201}
202
Al Viro31168482008-11-22 17:33:24 +0000203int __meminit offline_page_cgroup(unsigned long start_pfn,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700204 unsigned long nr_pages, int nid)
205{
206 unsigned long start, end, pfn;
207
KAMEZAWA Hiroyuki33c5d3d2008-11-12 13:27:01 -0800208 start = start_pfn & ~(PAGES_PER_SECTION - 1);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700209 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
210
211 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
212 __free_page_cgroup(pfn);
213 return 0;
214
215}
216
Al Viro31168482008-11-22 17:33:24 +0000217static int __meminit page_cgroup_callback(struct notifier_block *self,
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700218 unsigned long action, void *arg)
219{
220 struct memory_notify *mn = arg;
221 int ret = 0;
222 switch (action) {
223 case MEM_GOING_ONLINE:
224 ret = online_page_cgroup(mn->start_pfn,
225 mn->nr_pages, mn->status_change_nid);
226 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700227 case MEM_OFFLINE:
228 offline_page_cgroup(mn->start_pfn,
229 mn->nr_pages, mn->status_change_nid);
230 break;
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800231 case MEM_CANCEL_ONLINE:
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700232 case MEM_GOING_OFFLINE:
233 break;
234 case MEM_ONLINE:
235 case MEM_CANCEL_OFFLINE:
236 break;
237 }
KAMEZAWA Hiroyukidc19f9d2008-12-01 13:13:48 -0800238
239 if (ret)
240 ret = notifier_from_errno(ret);
241 else
242 ret = NOTIFY_OK;
243
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700244 return ret;
245}
246
247#endif
248
249void __init page_cgroup_init(void)
250{
251 unsigned long pfn;
252 int fail = 0;
253
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800254 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki94b6da52008-10-22 14:15:05 -0700255 return;
256
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700257 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
258 if (!pfn_present(pfn))
259 continue;
260 fail = init_section_page_cgroup(pfn);
261 }
262 if (fail) {
Randy Dunlap8ca739e2009-06-17 16:26:32 -0700263 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700264 panic("Out of memory");
265 } else {
266 hotplug_memory_notifier(page_cgroup_callback, 0);
267 }
268 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
Randy Dunlap8ca739e2009-06-17 16:26:32 -0700269 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
270 " want memory cgroups\n");
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700271}
272
Al Viro31168482008-11-22 17:33:24 +0000273void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700274{
275 return;
276}
277
278#endif
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800279
280
281#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
282
283static DEFINE_MUTEX(swap_cgroup_mutex);
284struct swap_cgroup_ctrl {
285 struct page **map;
286 unsigned long length;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400287 spinlock_t lock;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800288};
289
290struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
291
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800292struct swap_cgroup {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700293 unsigned short id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800294};
295#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
296#define SC_POS_MASK (SC_PER_PAGE - 1)
297
298/*
299 * SwapCgroup implements "lookup" and "exchange" operations.
300 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
301 * against SwapCache. At swap_free(), this is accessed directly from swap.
302 *
303 * This means,
304 * - we have no race in "exchange" when we're accessed via SwapCache because
305 * SwapCache(and its swp_entry) is under lock.
306 * - When called via swap_free(), there is no user of this entry and no race.
307 * Then, we don't need lock around "exchange".
308 *
309 * TODO: we can push these buffers out to HIGHMEM.
310 */
311
312/*
313 * allocate buffer for swap_cgroup.
314 */
315static int swap_cgroup_prepare(int type)
316{
317 struct page *page;
318 struct swap_cgroup_ctrl *ctrl;
319 unsigned long idx, max;
320
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800321 ctrl = &swap_cgroup_ctrl[type];
322
323 for (idx = 0; idx < ctrl->length; idx++) {
324 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
325 if (!page)
326 goto not_enough_page;
327 ctrl->map[idx] = page;
328 }
329 return 0;
330not_enough_page:
331 max = idx;
332 for (idx = 0; idx < max; idx++)
333 __free_page(ctrl->map[idx]);
334
335 return -ENOMEM;
336}
337
338/**
Daisuke Nishimura02491442010-03-10 15:22:17 -0800339 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
340 * @end: swap entry to be cmpxchged
341 * @old: old id
342 * @new: new id
343 *
344 * Returns old id at success, 0 at failure.
345 * (There is no mem_cgroup useing 0 as its id)
346 */
347unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
348 unsigned short old, unsigned short new)
349{
350 int type = swp_type(ent);
351 unsigned long offset = swp_offset(ent);
352 unsigned long idx = offset / SC_PER_PAGE;
353 unsigned long pos = offset & SC_POS_MASK;
354 struct swap_cgroup_ctrl *ctrl;
355 struct page *mappage;
356 struct swap_cgroup *sc;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400357 unsigned long flags;
358 unsigned short retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800359
360 ctrl = &swap_cgroup_ctrl[type];
361
362 mappage = ctrl->map[idx];
363 sc = page_address(mappage);
364 sc += pos;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400365 spin_lock_irqsave(&ctrl->lock, flags);
366 retval = sc->id;
367 if (retval == old)
368 sc->id = new;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800369 else
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400370 retval = 0;
371 spin_unlock_irqrestore(&ctrl->lock, flags);
372 return retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800373}
374
375/**
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800376 * swap_cgroup_record - record mem_cgroup for this swp_entry.
377 * @ent: swap entry to be recorded into
378 * @mem: mem_cgroup to be recorded
379 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700380 * Returns old value at success, 0 at failure.
381 * (Of course, old value can be 0.)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800382 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700383unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800384{
385 int type = swp_type(ent);
386 unsigned long offset = swp_offset(ent);
387 unsigned long idx = offset / SC_PER_PAGE;
388 unsigned long pos = offset & SC_POS_MASK;
389 struct swap_cgroup_ctrl *ctrl;
390 struct page *mappage;
391 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700392 unsigned short old;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400393 unsigned long flags;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800394
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800395 ctrl = &swap_cgroup_ctrl[type];
396
397 mappage = ctrl->map[idx];
398 sc = page_address(mappage);
399 sc += pos;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400400 spin_lock_irqsave(&ctrl->lock, flags);
401 old = sc->id;
402 sc->id = id;
403 spin_unlock_irqrestore(&ctrl->lock, flags);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800404
405 return old;
406}
407
408/**
409 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
410 * @ent: swap entry to be looked up.
411 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700412 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800413 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700414unsigned short lookup_swap_cgroup(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800415{
416 int type = swp_type(ent);
417 unsigned long offset = swp_offset(ent);
418 unsigned long idx = offset / SC_PER_PAGE;
419 unsigned long pos = offset & SC_POS_MASK;
420 struct swap_cgroup_ctrl *ctrl;
421 struct page *mappage;
422 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700423 unsigned short ret;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800424
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800425 ctrl = &swap_cgroup_ctrl[type];
426 mappage = ctrl->map[idx];
427 sc = page_address(mappage);
428 sc += pos;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700429 ret = sc->id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800430 return ret;
431}
432
433int swap_cgroup_swapon(int type, unsigned long max_pages)
434{
435 void *array;
436 unsigned long array_size;
437 unsigned long length;
438 struct swap_cgroup_ctrl *ctrl;
439
440 if (!do_swap_account)
441 return 0;
442
443 length = ((max_pages/SC_PER_PAGE) + 1);
444 array_size = length * sizeof(void *);
445
446 array = vmalloc(array_size);
447 if (!array)
448 goto nomem;
449
450 memset(array, 0, array_size);
451 ctrl = &swap_cgroup_ctrl[type];
452 mutex_lock(&swap_cgroup_mutex);
453 ctrl->length = length;
454 ctrl->map = array;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400455 spin_lock_init(&ctrl->lock);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800456 if (swap_cgroup_prepare(type)) {
457 /* memory shortage */
458 ctrl->map = NULL;
459 ctrl->length = 0;
460 vfree(array);
461 mutex_unlock(&swap_cgroup_mutex);
462 goto nomem;
463 }
464 mutex_unlock(&swap_cgroup_mutex);
465
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800466 return 0;
467nomem:
468 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
469 printk(KERN_INFO
470 "swap_cgroup can be disabled by noswapaccount boot option\n");
471 return -ENOMEM;
472}
473
474void swap_cgroup_swapoff(int type)
475{
476 int i;
477 struct swap_cgroup_ctrl *ctrl;
478
479 if (!do_swap_account)
480 return;
481
482 mutex_lock(&swap_cgroup_mutex);
483 ctrl = &swap_cgroup_ctrl[type];
484 if (ctrl->map) {
485 for (i = 0; i < ctrl->length; i++) {
486 struct page *page = ctrl->map[i];
487 if (page)
488 __free_page(page);
489 }
490 vfree(ctrl->map);
491 ctrl->map = NULL;
492 ctrl->length = 0;
493 }
494 mutex_unlock(&swap_cgroup_mutex);
495}
496
497#endif