blob: 2219cce81ca485ee5ede686bca82a32c327c28d4 [file] [log] [blame]
Nitin Gupta61989a82012-01-09 16:51:56 -06001/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
Minchan Kim31fc00b2014-01-30 15:45:55 -08005 * Copyright (C) 2012, 2013 Minchan Kim
Nitin Gupta61989a82012-01-09 16:51:56 -06006 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 */
13
14#ifndef _ZS_MALLOC_H_
15#define _ZS_MALLOC_H_
16
17#include <linux/types.h>
18
Seth Jenningsb7418512012-07-02 16:15:52 -050019/*
20 * zsmalloc mapping modes
21 *
Nitin Cuptac3e3e882013-12-11 11:04:37 +090022 * NOTE: These only make a difference when a mapped object spans pages.
23 * They also have no effect when PGTABLE_MAPPING is selected.
Sara Bird396b7fd2013-05-20 15:18:14 -040024 */
Seth Jenningsb7418512012-07-02 16:15:52 -050025enum zs_mapmode {
26 ZS_MM_RW, /* normal read-write mapping */
27 ZS_MM_RO, /* read-only (no copy-out at unmap time) */
28 ZS_MM_WO /* write-only (no copy-in at map time) */
Nitin Cuptac3e3e882013-12-11 11:04:37 +090029 /*
30 * NOTE: ZS_MM_WO should only be used for initializing new
31 * (uninitialized) allocations. Partial writes to already
32 * initialized allocations should use ZS_MM_RW to preserve the
33 * existing data.
34 */
Seth Jenningsb7418512012-07-02 16:15:52 -050035};
36
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -070037struct zs_pool_stats {
Sergey Senozhatsky860c7072015-09-08 15:04:38 -070038 /* How many pages were migrated (freed) */
39 unsigned long pages_compacted;
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -070040};
41
Nitin Gupta61989a82012-01-09 16:51:56 -060042struct zs_pool;
43
Sergey Senozhatskyd0d8da22016-05-20 16:59:48 -070044struct zs_pool *zs_create_pool(const char *name);
Nitin Gupta61989a82012-01-09 16:51:56 -060045void zs_destroy_pool(struct zs_pool *pool);
46
Sergey Senozhatskyd0d8da22016-05-20 16:59:48 -070047unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
Minchan Kimc2344342012-06-08 15:39:25 +090048void zs_free(struct zs_pool *pool, unsigned long obj);
Nitin Gupta61989a82012-01-09 16:51:56 -060049
Sergey Senozhatskyda33b2a2018-04-05 16:24:43 -070050size_t zs_huge_class_size(struct zs_pool *pool);
51
Seth Jenningsb7418512012-07-02 16:15:52 -050052void *zs_map_object(struct zs_pool *pool, unsigned long handle,
53 enum zs_mapmode mm);
Minchan Kimc2344342012-06-08 15:39:25 +090054void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
Nitin Gupta61989a82012-01-09 16:51:56 -060055
Minchan Kim722cdc12014-10-09 15:29:50 -070056unsigned long zs_get_total_pages(struct zs_pool *pool);
Minchan Kim312fcae2015-04-15 16:15:30 -070057unsigned long zs_compact(struct zs_pool *pool);
Nitin Gupta61989a82012-01-09 16:51:56 -060058
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -070059void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
Nitin Gupta61989a82012-01-09 16:51:56 -060060#endif