blob: 6398dfae53f103200a38b39c2a00cf89c4d12448 [file] [log] [blame]
Nitin Gupta61989a82012-01-09 16:51:56 -06001/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
Minchan Kim31fc00b2014-01-30 15:45:55 -08005 * Copyright (C) 2012, 2013 Minchan Kim
Nitin Gupta61989a82012-01-09 16:51:56 -06006 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 */
13
14#ifndef _ZS_MALLOC_H_
15#define _ZS_MALLOC_H_
16
17#include <linux/types.h>
18
Seth Jenningsb7418512012-07-02 16:15:52 -050019/*
20 * zsmalloc mapping modes
21 *
Nitin Cuptac3e3e882013-12-11 11:04:37 +090022 * NOTE: These only make a difference when a mapped object spans pages.
23 * They also have no effect when PGTABLE_MAPPING is selected.
Sara Bird396b7fd2013-05-20 15:18:14 -040024 */
Seth Jenningsb7418512012-07-02 16:15:52 -050025enum zs_mapmode {
26 ZS_MM_RW, /* normal read-write mapping */
27 ZS_MM_RO, /* read-only (no copy-out at unmap time) */
28 ZS_MM_WO /* write-only (no copy-in at map time) */
Nitin Cuptac3e3e882013-12-11 11:04:37 +090029 /*
30 * NOTE: ZS_MM_WO should only be used for initializing new
31 * (uninitialized) allocations. Partial writes to already
32 * initialized allocations should use ZS_MM_RW to preserve the
33 * existing data.
34 */
Seth Jenningsb7418512012-07-02 16:15:52 -050035};
36
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -070037struct zs_pool_stats {
Sergey Senozhatsky860c7072015-09-08 15:04:38 -070038 /* How many pages were migrated (freed) */
39 unsigned long pages_compacted;
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -070040};
41
Nitin Gupta61989a82012-01-09 16:51:56 -060042struct zs_pool;
43
Ganesh Mahendran3eba0c62015-02-12 15:00:51 -080044struct zs_pool *zs_create_pool(char *name, gfp_t flags);
Nitin Gupta61989a82012-01-09 16:51:56 -060045void zs_destroy_pool(struct zs_pool *pool);
46
Minchan Kimc2344342012-06-08 15:39:25 +090047unsigned long zs_malloc(struct zs_pool *pool, size_t size);
48void zs_free(struct zs_pool *pool, unsigned long obj);
Nitin Gupta61989a82012-01-09 16:51:56 -060049
Seth Jenningsb7418512012-07-02 16:15:52 -050050void *zs_map_object(struct zs_pool *pool, unsigned long handle,
51 enum zs_mapmode mm);
Minchan Kimc2344342012-06-08 15:39:25 +090052void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
Nitin Gupta61989a82012-01-09 16:51:56 -060053
Minchan Kim722cdc12014-10-09 15:29:50 -070054unsigned long zs_get_total_pages(struct zs_pool *pool);
Minchan Kim312fcae2015-04-15 16:15:30 -070055unsigned long zs_compact(struct zs_pool *pool);
Nitin Gupta61989a82012-01-09 16:51:56 -060056
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -070057void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
Nitin Gupta61989a82012-01-09 16:51:56 -060058#endif