blob: 8e6b3ba81c03e398d2b91c76c456760c2a97ae82 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6#ifndef __XFS_SUPPORT_KMEM_H__
7#define __XFS_SUPPORT_KMEM_H__
8
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000012#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14/*
Nathan Scott87582802006-03-14 13:18:19 +110015 * General memory allocation interfaces
16 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Al Viro77ba7872012-04-02 06:24:04 -040018typedef unsigned __bitwise xfs_km_flags_t;
19#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
20#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
21#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
22#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
Gu Zheng359d9922013-11-04 18:21:05 +080023#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
Nathan Scott87582802006-03-14 13:18:19 +110024
25/*
26 * We use a special process flag to avoid recursive callbacks into
27 * the filesystem during transactions. We will also issue our own
28 * warnings, so we explicitly skip any generic ones (silly of us).
29 */
30static inline gfp_t
Al Viro77ba7872012-04-02 06:24:04 -040031kmem_flags_convert(xfs_km_flags_t flags)
Nathan Scott87582802006-03-14 13:18:19 +110032{
33 gfp_t lflags;
34
Gu Zheng359d9922013-11-04 18:21:05 +080035 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37 if (flags & KM_NOSLEEP) {
Nathan Scott87582802006-03-14 13:18:19 +110038 lflags = GFP_ATOMIC | __GFP_NOWARN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 } else {
Nathan Scott87582802006-03-14 13:18:19 +110040 lflags = GFP_KERNEL | __GFP_NOWARN;
Michal Hocko7dea19f2017-05-03 14:53:15 -070041 if (flags & KM_NOFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 lflags &= ~__GFP_FS;
43 }
Gu Zheng359d9922013-11-04 18:21:05 +080044
Michal Hocko91c63ecd2017-07-12 14:36:49 -070045 /*
46 * Default page/slab allocator behavior is to retry for ever
47 * for small allocations. We can override this behavior by using
48 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
49 * as it is feasible but rather fail than retry forever for all
50 * request sizes.
51 */
52 if (flags & KM_MAYFAIL)
53 lflags |= __GFP_RETRY_MAYFAIL;
54
Gu Zheng359d9922013-11-04 18:21:05 +080055 if (flags & KM_ZERO)
56 lflags |= __GFP_ZERO;
57
Nathan Scott87582802006-03-14 13:18:19 +110058 return lflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059}
60
Al Viro77ba7872012-04-02 06:24:04 -040061extern void *kmem_alloc(size_t, xfs_km_flags_t);
Dave Chinnercb0a8d22018-03-06 17:03:28 -080062extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
Christoph Hellwig664b60f2016-04-06 09:47:01 +100063extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
Wang, Yalinf3d21552015-02-02 09:54:18 +110064static inline void kmem_free(const void *ptr)
65{
66 kvfree(ptr);
67}
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Christoph Hellwigbdfb0432010-01-20 21:55:30 +000069
Gu Zheng359d9922013-11-04 18:21:05 +080070static inline void *
71kmem_zalloc(size_t size, xfs_km_flags_t flags)
72{
73 return kmem_alloc(size, flags | KM_ZERO);
74}
75
Dave Chinnercb0a8d22018-03-06 17:03:28 -080076static inline void *
77kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
78{
79 return kmem_alloc_large(size, flags | KM_ZERO);
80}
81
Nathan Scott87582802006-03-14 13:18:19 +110082/*
83 * Zone interfaces
84 */
85
86#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
87#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
Paul Jacksonb0196002006-03-24 03:16:09 -080088#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
Vladimir Davydov5d097052016-01-14 15:18:21 -080089#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
Nathan Scott87582802006-03-14 13:18:19 +110090
91#define kmem_zone kmem_cache
92#define kmem_zone_t struct kmem_cache
93
94static inline kmem_zone_t *
95kmem_zone_init(int size, char *zone_name)
96{
Paul Mundt20c2df82007-07-20 10:11:58 +090097 return kmem_cache_create(zone_name, size, 0, 0, NULL);
Nathan Scott87582802006-03-14 13:18:19 +110098}
99
100static inline kmem_zone_t *
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800101kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700102 void (*construct)(void *))
Nathan Scott87582802006-03-14 13:18:19 +1100103{
Paul Mundt20c2df82007-07-20 10:11:58 +0900104 return kmem_cache_create(zone_name, size, 0, flags, construct);
Nathan Scott87582802006-03-14 13:18:19 +1100105}
106
107static inline void
108kmem_zone_free(kmem_zone_t *zone, void *ptr)
109{
110 kmem_cache_free(zone, ptr);
111}
112
113static inline void
114kmem_zone_destroy(kmem_zone_t *zone)
115{
Tim Hansen478f8da2017-11-08 12:00:40 -0800116 kmem_cache_destroy(zone);
Nathan Scott87582802006-03-14 13:18:19 +1100117}
118
Al Viro77ba7872012-04-02 06:24:04 -0400119extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
Gu Zheng359d9922013-11-04 18:21:05 +0800120
121static inline void *
122kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
123{
124 return kmem_zone_alloc(zone, flags | KM_ZERO);
125}
Nathan Scott87582802006-03-14 13:18:19 +1100126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#endif /* __XFS_SUPPORT_KMEM_H__ */