blob: 58d4ac537f65a5e02e4ae5d875253bfcdea5a7fd [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#ifndef _ZRAM_DRV_H_
16#define _ZRAM_DRV_H_
Nitin Gupta306b0c92009-09-22 10:26:53 +053017
Nitin Gupta6a907722010-01-28 21:13:37 +053018#include <linux/spinlock.h>
19#include <linux/mutex.h>
Minchan Kimbcf16472014-01-30 15:45:50 -080020#include <linux/zsmalloc.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053021
22/*
23 * Some arbitrary value. This is just to catch
24 * invalid value for num_devices module parameter.
25 */
26static const unsigned max_num_devices = 32;
27
Nitin Gupta306b0c92009-09-22 10:26:53 +053028/*-- Configurable parameters */
29
Nitin Gupta306b0c92009-09-22 10:26:53 +053030/*
Nitin Gupta306b0c92009-09-22 10:26:53 +053031 * Pages that compress to size greater than this are stored
32 * uncompressed in memory.
33 */
Nitin Gupta2ccbec02011-09-09 19:01:00 -040034static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
36/*
Nitin Gupta97a06382010-05-13 14:24:21 +053037 * NOTE: max_zpage_size must be less than or equal to:
Minchan Kim55dcbbb2012-10-10 08:49:52 +090038 * ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
39 * always return failure.
Nitin Gupta306b0c92009-09-22 10:26:53 +053040 */
41
42/*-- End of configurable params */
43
44#define SECTOR_SHIFT 9
45#define SECTOR_SIZE (1 << SECTOR_SHIFT)
46#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
47#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
Jerome Marchand924bd882011-06-10 15:28:48 +020048#define ZRAM_LOGICAL_BLOCK_SHIFT 12
49#define ZRAM_LOGICAL_BLOCK_SIZE (1 << ZRAM_LOGICAL_BLOCK_SHIFT)
50#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
51 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
Nitin Gupta306b0c92009-09-22 10:26:53 +053052
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053053/* Flags for zram pages (table[page_no].flags) */
54enum zram_pageflags {
Nitin Gupta306b0c92009-09-22 10:26:53 +053055 /* Page consists entirely of zeros */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053056 ZRAM_ZERO,
Nitin Gupta306b0c92009-09-22 10:26:53 +053057
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053058 __NR_ZRAM_PAGEFLAGS,
Nitin Gupta306b0c92009-09-22 10:26:53 +053059};
60
61/*-- Data structures */
62
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053063/* Allocated for each disk page */
Nitin Gupta306b0c92009-09-22 10:26:53 +053064struct table {
Minchan Kimc2344342012-06-08 15:39:25 +090065 unsigned long handle;
Nitin Guptafd1a30d2012-01-09 16:51:59 -060066 u16 size; /* object size (excluding header) */
Nitin Gupta306b0c92009-09-22 10:26:53 +053067 u8 count; /* object ref count (not yet used) */
68 u8 flags;
Sam Hansen80677c22012-06-07 16:03:48 -070069} __aligned(4);
Nitin Gupta306b0c92009-09-22 10:26:53 +053070
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053071struct zram_stats {
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070072 atomic64_t compr_data_size; /* compressed size of pages stored */
Jiang Liuda5cc7d2013-06-07 00:07:31 +080073 atomic64_t num_reads; /* failed + successful */
74 atomic64_t num_writes; /* --do-- */
75 atomic64_t failed_reads; /* should NEVER! happen */
76 atomic64_t failed_writes; /* can happen when memory is too low */
77 atomic64_t invalid_io; /* non-page-aligned I/O requests */
78 atomic64_t notify_free; /* no. of swap slot free notifications */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070079 atomic64_t zero_pages; /* no. of zero filled pages */
80 atomic64_t pages_stored; /* no. of pages currently stored */
Nitin Gupta306b0c92009-09-22 10:26:53 +053081};
82
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090083struct zram_meta {
Minchan Kim92967472014-01-30 15:46:03 -080084 rwlock_t tb_lock; /* protect table */
Nitin Gupta306b0c92009-09-22 10:26:53 +053085 void *compress_workmem;
86 void *compress_buffer;
87 struct table *table;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090088 struct zs_pool *mem_pool;
Minchan Kime46e3312014-01-30 15:46:06 -080089 struct mutex buffer_lock; /* protect compress buffers */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090090};
91
92struct zram {
93 struct zram_meta *meta;
Nitin Gupta306b0c92009-09-22 10:26:53 +053094 struct request_queue *queue;
95 struct gendisk *disk;
Jerome Marchand0900bea2011-09-06 15:02:11 +020096 /* Prevent concurrent execution of device init, reset and R/W request */
97 struct rw_semaphore init_lock;
Nitin Gupta306b0c92009-09-22 10:26:53 +053098 /*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053099 * This is the limit on amount of *uncompressed* worth of data
100 * we can store in a disk.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530101 */
Nitin Gupta33863c22010-08-09 22:56:47 +0530102 u64 disksize; /* bytes */
Nitin Gupta306b0c92009-09-22 10:26:53 +0530103
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530104 struct zram_stats stats;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530105};
Nitin Gupta6a907722010-01-28 21:13:37 +0530106#endif