Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
| 18 | #ifndef __XFS_SUPPORT_KMEM_H__ |
| 19 | #define __XFS_SUPPORT_KMEM_H__ |
| 20 | |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/mm.h> |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 24 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
| 26 | /* |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 27 | * General memory allocation interfaces |
| 28 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 30 | #define KM_SLEEP 0x0001u |
| 31 | #define KM_NOSLEEP 0x0002u |
| 32 | #define KM_NOFS 0x0004u |
| 33 | #define KM_MAYFAIL 0x0008u |
| 34 | |
| 35 | /* |
| 36 | * We use a special process flag to avoid recursive callbacks into |
| 37 | * the filesystem during transactions. We will also issue our own |
| 38 | * warnings, so we explicitly skip any generic ones (silly of us). |
| 39 | */ |
| 40 | static inline gfp_t |
| 41 | kmem_flags_convert(unsigned int __nocast flags) |
| 42 | { |
| 43 | gfp_t lflags; |
| 44 | |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 45 | BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | if (flags & KM_NOSLEEP) { |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 48 | lflags = GFP_ATOMIC | __GFP_NOWARN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | } else { |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 50 | lflags = GFP_KERNEL | __GFP_NOWARN; |
Nathan Scott | 59c1b08 | 2006-06-09 14:59:13 +1000 | [diff] [blame] | 51 | if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | lflags &= ~__GFP_FS; |
| 53 | } |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 54 | return lflags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | } |
| 56 | |
Nathan Scott | 7f248a8 | 2005-11-03 16:14:31 +1100 | [diff] [blame] | 57 | extern void *kmem_alloc(size_t, unsigned int __nocast); |
Nathan Scott | 7f248a8 | 2005-11-03 16:14:31 +1100 | [diff] [blame] | 58 | extern void *kmem_zalloc(size_t, unsigned int __nocast); |
Barry Naujok | d3689d7 | 2008-05-21 18:38:40 +1000 | [diff] [blame] | 59 | extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); |
| 60 | extern void kmem_free(const void *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
Christoph Hellwig | bdfb043 | 2010-01-20 21:55:30 +0000 | [diff] [blame] | 62 | static inline void *kmem_zalloc_large(size_t size) |
| 63 | { |
| 64 | void *ptr; |
| 65 | |
| 66 | ptr = vmalloc(size); |
| 67 | if (ptr) |
| 68 | memset(ptr, 0, size); |
| 69 | return ptr; |
| 70 | } |
| 71 | static inline void kmem_free_large(void *ptr) |
| 72 | { |
| 73 | vfree(ptr); |
| 74 | } |
| 75 | |
| 76 | extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); |
| 77 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 78 | /* |
| 79 | * Zone interfaces |
| 80 | */ |
| 81 | |
| 82 | #define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN |
| 83 | #define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT |
Paul Jackson | b019600 | 2006-03-24 03:16:09 -0800 | [diff] [blame] | 84 | #define KM_ZONE_SPREAD SLAB_MEM_SPREAD |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 85 | |
| 86 | #define kmem_zone kmem_cache |
| 87 | #define kmem_zone_t struct kmem_cache |
| 88 | |
| 89 | static inline kmem_zone_t * |
| 90 | kmem_zone_init(int size, char *zone_name) |
| 91 | { |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 92 | return kmem_cache_create(zone_name, size, 0, 0, NULL); |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline kmem_zone_t * |
| 96 | kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 97 | void (*construct)(void *)) |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 98 | { |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 99 | return kmem_cache_create(zone_name, size, 0, flags, construct); |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static inline void |
| 103 | kmem_zone_free(kmem_zone_t *zone, void *ptr) |
| 104 | { |
| 105 | kmem_cache_free(zone, ptr); |
| 106 | } |
| 107 | |
| 108 | static inline void |
| 109 | kmem_zone_destroy(kmem_zone_t *zone) |
| 110 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 111 | if (zone) |
| 112 | kmem_cache_destroy(zone); |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); |
| 116 | extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); |
| 117 | |
Nathan Scott | 8758280 | 2006-03-14 13:18:19 +1100 | [diff] [blame] | 118 | static inline int |
Al Viro | 27496a8 | 2005-10-21 03:20:48 -0400 | [diff] [blame] | 119 | kmem_shake_allow(gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
Felix Blyakher | 1b17d76 | 2009-06-01 13:13:24 -0500 | [diff] [blame] | 121 | return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | #endif /* __XFS_SUPPORT_KMEM_H__ */ |