blob: 3a5f24c341dc8f552f6da5233d291a9bf65d2a2a [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/vmalloc.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053034
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053035#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053036
37/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053038static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080039static struct zram *zram_devices;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070040static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053041
Nitin Gupta306b0c92009-09-22 10:26:53 +053042/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080043static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053044
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070045#define ZRAM_ATTR_RO(name) \
46static ssize_t zram_attr_##name##_show(struct device *d, \
47 struct device_attribute *attr, char *b) \
48{ \
49 struct zram *zram = dev_to_zram(d); \
50 return sprintf(b, "%llu\n", \
51 (u64)atomic64_read(&zram->stats.name)); \
52} \
53static struct device_attribute dev_attr_##name = \
54 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
55
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070056static inline int init_done(struct zram *zram)
57{
58 return zram->meta != NULL;
59}
60
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030061static inline struct zram *dev_to_zram(struct device *dev)
62{
63 return (struct zram *)dev_to_disk(dev)->private_data;
64}
65
66static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
71 return sprintf(buf, "%llu\n", zram->disksize);
72}
73
74static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070077 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030078 struct zram *zram = dev_to_zram(dev);
79
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070080 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030083
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070084 return sprintf(buf, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030085}
86
87static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct zram *zram = dev_to_zram(dev);
91
92 return sprintf(buf, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070093 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030094}
95
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030096static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
98{
99 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev);
101 struct zram_meta *meta = zram->meta;
102
103 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700104 if (init_done(zram))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300105 val = zs_get_total_size_bytes(meta->mem_pool);
106 up_read(&zram->init_lock);
107
108 return sprintf(buf, "%llu\n", val);
109}
110
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700111static ssize_t max_comp_streams_show(struct device *dev,
112 struct device_attribute *attr, char *buf)
113{
114 int val;
115 struct zram *zram = dev_to_zram(dev);
116
117 down_read(&zram->init_lock);
118 val = zram->max_comp_streams;
119 up_read(&zram->init_lock);
120
121 return sprintf(buf, "%d\n", val);
122}
123
124static ssize_t max_comp_streams_store(struct device *dev,
125 struct device_attribute *attr, const char *buf, size_t len)
126{
127 int num;
128 struct zram *zram = dev_to_zram(dev);
129
130 if (kstrtoint(buf, 0, &num))
131 return -EINVAL;
132 if (num < 1)
133 return -EINVAL;
134 down_write(&zram->init_lock);
135 if (init_done(zram)) {
Sergey Senozhatskyfe8eb122014-04-07 15:38:15 -0700136 if (zcomp_set_max_streams(zram->comp, num))
137 pr_info("Cannot change max compression streams\n");
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700138 }
139 zram->max_comp_streams = num;
140 up_write(&zram->init_lock);
141 return len;
142}
143
Minchan Kim92967472014-01-30 15:46:03 -0800144/* flag operations needs meta->tb_lock */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900145static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530146 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530147{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900148 return meta->table[index].flags & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530149}
150
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900151static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530152 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530153{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900154 meta->table[index].flags |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530155}
156
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900157static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530158 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530159{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900160 meta->table[index].flags &= ~BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530161}
162
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300163static inline int is_partial_io(struct bio_vec *bvec)
164{
165 return bvec->bv_len != PAGE_SIZE;
166}
167
168/*
169 * Check if request is within bounds and aligned on zram logical blocks.
170 */
171static inline int valid_io_request(struct zram *zram, struct bio *bio)
172{
173 u64 start, end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530174
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300175 /* unaligned request */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700176 if (unlikely(bio->bi_iter.bi_sector &
177 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300178 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700179 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300180 return 0;
181
Kent Overstreet4f024f32013-10-11 15:44:27 -0700182 start = bio->bi_iter.bi_sector;
183 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300184 bound = zram->disksize >> SECTOR_SHIFT;
185 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300186 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300187 return 0;
188
189 /* I/O request is valid */
190 return 1;
191}
192
193static void zram_meta_free(struct zram_meta *meta)
194{
195 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300196 vfree(meta->table);
197 kfree(meta);
198}
199
200static struct zram_meta *zram_meta_alloc(u64 disksize)
201{
202 size_t num_pages;
203 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
204 if (!meta)
205 goto out;
206
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300207 num_pages = disksize >> PAGE_SHIFT;
208 meta->table = vzalloc(num_pages * sizeof(*meta->table));
209 if (!meta->table) {
210 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700211 goto free_meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300212 }
213
214 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
215 if (!meta->mem_pool) {
216 pr_err("Error creating memory pool\n");
217 goto free_table;
218 }
219
Minchan Kim92967472014-01-30 15:46:03 -0800220 rwlock_init(&meta->tb_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300221 return meta;
222
223free_table:
224 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300225free_meta:
226 kfree(meta);
227 meta = NULL;
228out:
229 return meta;
230}
231
232static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
233{
234 if (*offset + bvec->bv_len >= PAGE_SIZE)
235 (*index)++;
236 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
237}
238
Nitin Gupta306b0c92009-09-22 10:26:53 +0530239static int page_zero_filled(void *ptr)
240{
241 unsigned int pos;
242 unsigned long *page;
243
244 page = (unsigned long *)ptr;
245
246 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
247 if (page[pos])
248 return 0;
249 }
250
251 return 1;
252}
253
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300254static void handle_zero_page(struct bio_vec *bvec)
255{
256 struct page *page = bvec->bv_page;
257 void *user_mem;
258
259 user_mem = kmap_atomic(page);
260 if (is_partial_io(bvec))
261 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
262 else
263 clear_page(user_mem);
264 kunmap_atomic(user_mem);
265
266 flush_dcache_page(page);
267}
268
Minchan Kim92967472014-01-30 15:46:03 -0800269/* NOTE: caller should hold meta->tb_lock with write-side */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530270static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530271{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900272 struct zram_meta *meta = zram->meta;
273 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530274
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600275 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530276 /*
277 * No memory is allocated for zero filled pages.
278 * Simply clear zero page flag.
279 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900280 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
281 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700282 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530283 }
284 return;
285 }
286
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900287 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530288
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700289 atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
290 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530291
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900292 meta->table[index].handle = 0;
293 meta->table[index].size = 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530294}
295
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300296static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530297{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700298 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200299 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900300 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800301 unsigned long handle;
302 u16 size;
303
304 read_lock(&meta->tb_lock);
305 handle = meta->table[index].handle;
306 size = meta->table[index].size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200307
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900308 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim92967472014-01-30 15:46:03 -0800309 read_unlock(&meta->tb_lock);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800310 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200311 return 0;
312 }
313
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900314 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim92967472014-01-30 15:46:03 -0800315 if (size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800316 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300317 else
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700318 ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900319 zs_unmap_object(meta->mem_pool, handle);
Minchan Kim92967472014-01-30 15:46:03 -0800320 read_unlock(&meta->tb_lock);
Jerome Marchand924bd882011-06-10 15:28:48 +0200321
322 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700323 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200324 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800325 atomic64_inc(&zram->stats.failed_reads);
Jerome Marchand924bd882011-06-10 15:28:48 +0200326 return ret;
327 }
328
329 return 0;
330}
331
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300332static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
333 u32 index, int offset, struct bio *bio)
334{
335 int ret;
336 struct page *page;
337 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900338 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300339 page = bvec->bv_page;
340
Minchan Kim92967472014-01-30 15:46:03 -0800341 read_lock(&meta->tb_lock);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900342 if (unlikely(!meta->table[index].handle) ||
343 zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim92967472014-01-30 15:46:03 -0800344 read_unlock(&meta->tb_lock);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300345 handle_zero_page(bvec);
346 return 0;
347 }
Minchan Kim92967472014-01-30 15:46:03 -0800348 read_unlock(&meta->tb_lock);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300349
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300350 if (is_partial_io(bvec))
351 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900352 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
353
354 user_mem = kmap_atomic(page);
355 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300356 uncmem = user_mem;
357
358 if (!uncmem) {
359 pr_info("Unable to allocate temp memory\n");
360 ret = -ENOMEM;
361 goto out_cleanup;
362 }
363
364 ret = zram_decompress_page(zram, uncmem, index);
365 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700366 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300367 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300368
369 if (is_partial_io(bvec))
370 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
371 bvec->bv_len);
372
373 flush_dcache_page(page);
374 ret = 0;
375out_cleanup:
376 kunmap_atomic(user_mem);
377 if (is_partial_io(bvec))
378 kfree(uncmem);
379 return ret;
380}
381
Jerome Marchand924bd882011-06-10 15:28:48 +0200382static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
383 int offset)
384{
Nitin Gupta397c6062013-01-02 08:53:41 -0800385 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200386 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900387 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900388 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200389 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900390 struct zram_meta *meta = zram->meta;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700391 struct zcomp_strm *zstrm;
Minchan Kime46e3312014-01-30 15:46:06 -0800392 bool locked = false;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200393
394 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200395 if (is_partial_io(bvec)) {
396 /*
397 * This is a partial IO. We need to read the full page
398 * before to write the changes.
399 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900400 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200401 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200402 ret = -ENOMEM;
403 goto out;
404 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300405 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800406 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200407 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200408 }
409
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700410 zstrm = zcomp_strm_find(zram->comp);
Minchan Kime46e3312014-01-30 15:46:06 -0800411 locked = true;
Cong Wangba82fe22011-11-25 23:14:25 +0800412 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200413
Nitin Gupta397c6062013-01-02 08:53:41 -0800414 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200415 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
416 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800417 kunmap_atomic(user_mem);
418 user_mem = NULL;
419 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200420 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800421 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200422
423 if (page_zero_filled(uncmem)) {
Cong Wangba82fe22011-11-25 23:14:25 +0800424 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900425 /* Free memory associated with this sector now. */
Minchan Kim92967472014-01-30 15:46:03 -0800426 write_lock(&zram->meta->tb_lock);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900427 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800428 zram_set_flag(meta, index, ZRAM_ZERO);
429 write_unlock(&zram->meta->tb_lock);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900430
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700431 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200432 ret = 0;
433 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200434 }
435
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700436 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800437 if (!is_partial_io(bvec)) {
438 kunmap_atomic(user_mem);
439 user_mem = NULL;
440 uncmem = NULL;
441 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200442
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700443 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200444 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200445 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200446 }
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700447 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700448 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700449 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800450 if (is_partial_io(bvec))
451 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700452 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200453
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900454 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600455 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400456 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
457 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200458 ret = -ENOMEM;
459 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200460 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900461 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200462
Jiang Liu42e99bd2013-06-07 00:07:30 +0800463 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800464 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800465 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800466 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800467 } else {
468 memcpy(cmem, src, clen);
469 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200470
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700471 zcomp_strm_release(zram->comp, zstrm);
472 locked = false;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900473 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600474
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900475 /*
476 * Free memory associated with this sector
477 * before overwriting unused sectors.
478 */
Minchan Kim92967472014-01-30 15:46:03 -0800479 write_lock(&zram->meta->tb_lock);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900480 zram_free_page(zram, index);
481
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900482 meta->table[index].handle = handle;
483 meta->table[index].size = clen;
Minchan Kim92967472014-01-30 15:46:03 -0800484 write_unlock(&zram->meta->tb_lock);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200485
486 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700487 atomic64_add(clen, &zram->stats.compr_data_size);
488 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200489out:
Minchan Kime46e3312014-01-30 15:46:06 -0800490 if (locked)
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700491 zcomp_strm_release(zram->comp, zstrm);
Nitin Gupta397c6062013-01-02 08:53:41 -0800492 if (is_partial_io(bvec))
493 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200494 if (ret)
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800495 atomic64_inc(&zram->stats.failed_writes);
Jerome Marchand924bd882011-06-10 15:28:48 +0200496 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200497}
498
499static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700500 int offset, struct bio *bio)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200501{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200502 int ret;
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700503 int rw = bio_data_dir(bio);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200504
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700505 if (rw == READ) {
506 atomic64_inc(&zram->stats.num_reads);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200507 ret = zram_bvec_read(zram, bvec, index, offset, bio);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700508 } else {
509 atomic64_inc(&zram->stats.num_writes);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200510 ret = zram_bvec_write(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700511 }
Jerome Marchandc5bde232011-06-10 15:28:49 +0200512
513 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200514}
515
Minchan Kim2b86ab92013-08-12 15:13:55 +0900516static void zram_reset_device(struct zram *zram, bool reset_capacity)
Jerome Marchand924bd882011-06-10 15:28:48 +0200517{
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300518 size_t index;
519 struct zram_meta *meta;
520
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300521 down_write(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700522 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300523 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300524 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300525 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300526
527 meta = zram->meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300528 /* Free all pages that are still in this zram device */
529 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
530 unsigned long handle = meta->table[index].handle;
531 if (!handle)
532 continue;
533
534 zs_free(meta->mem_pool, handle);
535 }
536
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700537 zcomp_destroy(zram->comp);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700538 zram->max_comp_streams = 1;
539
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300540 zram_meta_free(zram->meta);
541 zram->meta = NULL;
542 /* Reset stats */
543 memset(&zram->stats, 0, sizeof(zram->stats));
544
545 zram->disksize = 0;
Minchan Kim2b86ab92013-08-12 15:13:55 +0900546 if (reset_capacity)
547 set_capacity(zram->disk, 0);
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300548 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300549}
550
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300551static ssize_t disksize_store(struct device *dev,
552 struct device_attribute *attr, const char *buf, size_t len)
553{
554 u64 disksize;
555 struct zram_meta *meta;
556 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700557 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300558
559 disksize = memparse(buf, NULL);
560 if (!disksize)
561 return -EINVAL;
562
563 disksize = PAGE_ALIGN(disksize);
564 meta = zram_meta_alloc(disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800565 if (!meta)
566 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700567
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300568 down_write(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700569 if (init_done(zram)) {
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300570 pr_info("Cannot change disksize for initialized device\n");
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700571 err = -EBUSY;
572 goto out_free_meta;
573 }
574
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700575 zram->comp = zcomp_create(default_compressor, zram->max_comp_streams);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700576 if (!zram->comp) {
577 pr_info("Cannot initialise %s compressing backend\n",
578 default_compressor);
579 err = -EINVAL;
580 goto out_free_meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300581 }
582
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700583 zram->meta = meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300584 zram->disksize = disksize;
585 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300586 up_write(&zram->init_lock);
587
588 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700589
590out_free_meta:
591 up_write(&zram->init_lock);
592 zram_meta_free(meta);
593 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300594}
595
596static ssize_t reset_store(struct device *dev,
597 struct device_attribute *attr, const char *buf, size_t len)
598{
599 int ret;
600 unsigned short do_reset;
601 struct zram *zram;
602 struct block_device *bdev;
603
604 zram = dev_to_zram(dev);
605 bdev = bdget_disk(zram->disk, 0);
606
Rashika Kheria46a51c82013-10-30 18:36:32 +0530607 if (!bdev)
608 return -ENOMEM;
609
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300610 /* Do not reset an active device! */
Rashika Kheria1b672222013-11-10 22:13:53 +0530611 if (bdev->bd_holders) {
612 ret = -EBUSY;
613 goto out;
614 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300615
616 ret = kstrtou16(buf, 10, &do_reset);
617 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530618 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300619
Rashika Kheria1b672222013-11-10 22:13:53 +0530620 if (!do_reset) {
621 ret = -EINVAL;
622 goto out;
623 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300624
625 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530626 fsync_bdev(bdev);
Rashika Kheria1b672222013-11-10 22:13:53 +0530627 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300628
Minchan Kim2b86ab92013-08-12 15:13:55 +0900629 zram_reset_device(zram, true);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300630 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530631
632out:
633 bdput(bdev);
634 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200635}
636
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700637static void __zram_make_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530638{
Kent Overstreet79886132013-11-23 17:19:00 -0800639 int offset;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530640 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800641 struct bio_vec bvec;
642 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530643
Kent Overstreet4f024f32013-10-11 15:44:27 -0700644 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
645 offset = (bio->bi_iter.bi_sector &
646 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530647
Kent Overstreet79886132013-11-23 17:19:00 -0800648 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200649 int max_transfer_size = PAGE_SIZE - offset;
650
Kent Overstreet79886132013-11-23 17:19:00 -0800651 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200652 /*
653 * zram_bvec_rw() can only make operation on a single
654 * zram page. Split the bio vector.
655 */
656 struct bio_vec bv;
657
Kent Overstreet79886132013-11-23 17:19:00 -0800658 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200659 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800660 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200661
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700662 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200663 goto out;
664
Kent Overstreet79886132013-11-23 17:19:00 -0800665 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200666 bv.bv_offset += max_transfer_size;
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700667 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200668 goto out;
669 } else
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700670 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200671 goto out;
672
Kent Overstreet79886132013-11-23 17:19:00 -0800673 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530674 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530675
676 set_bit(BIO_UPTODATE, &bio->bi_flags);
677 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500678 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530679
680out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530681 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530682}
683
Nitin Gupta306b0c92009-09-22 10:26:53 +0530684/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530685 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530686 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200687static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530688{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530689 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530690
Jerome Marchand0900bea2011-09-06 15:02:11 +0200691 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700692 if (unlikely(!init_done(zram)))
Minchan Kim3de738c2013-01-30 11:41:41 +0900693 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200694
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530695 if (!valid_io_request(zram, bio)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800696 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900697 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100698 }
699
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700700 __zram_make_request(zram, bio);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200701 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530702
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700703 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200704
Jerome Marchand0900bea2011-09-06 15:02:11 +0200705error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900706 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200707 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530708}
709
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400710static void zram_slot_free_notify(struct block_device *bdev,
711 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530712{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530713 struct zram *zram;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800714 struct zram_meta *meta;
Nitin Gupta107c1612010-05-17 11:02:44 +0530715
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530716 zram = bdev->bd_disk->private_data;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800717 meta = zram->meta;
718
719 write_lock(&meta->tb_lock);
720 zram_free_page(zram, index);
721 write_unlock(&meta->tb_lock);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800722 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530723}
724
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530725static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530726 .swap_slot_free_notify = zram_slot_free_notify,
Nitin Gupta107c1612010-05-17 11:02:44 +0530727 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530728};
729
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300730static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
731 disksize_show, disksize_store);
732static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
733static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300734static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300735static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700736static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
737 max_comp_streams_show, max_comp_streams_store);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300738
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700739ZRAM_ATTR_RO(num_reads);
740ZRAM_ATTR_RO(num_writes);
Sergey Senozhatsky64447242014-04-07 15:38:05 -0700741ZRAM_ATTR_RO(failed_reads);
742ZRAM_ATTR_RO(failed_writes);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700743ZRAM_ATTR_RO(invalid_io);
744ZRAM_ATTR_RO(notify_free);
745ZRAM_ATTR_RO(zero_pages);
746ZRAM_ATTR_RO(compr_data_size);
747
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300748static struct attribute *zram_disk_attrs[] = {
749 &dev_attr_disksize.attr,
750 &dev_attr_initstate.attr,
751 &dev_attr_reset.attr,
752 &dev_attr_num_reads.attr,
753 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -0700754 &dev_attr_failed_reads.attr,
755 &dev_attr_failed_writes.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300756 &dev_attr_invalid_io.attr,
757 &dev_attr_notify_free.attr,
758 &dev_attr_zero_pages.attr,
759 &dev_attr_orig_data_size.attr,
760 &dev_attr_compr_data_size.attr,
761 &dev_attr_mem_used_total.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700762 &dev_attr_max_comp_streams.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300763 NULL,
764};
765
766static struct attribute_group zram_disk_attr_group = {
767 .attrs = zram_disk_attrs,
768};
769
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530770static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530771{
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800772 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530773
Jerome Marchand0900bea2011-09-06 15:02:11 +0200774 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530775
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530776 zram->queue = blk_alloc_queue(GFP_KERNEL);
777 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530778 pr_err("Error allocating disk queue for device %d\n",
779 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530780 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530781 }
782
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530783 blk_queue_make_request(zram->queue, zram_make_request);
784 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530785
786 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530787 zram->disk = alloc_disk(1);
788 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -0700789 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +0530790 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800791 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530792 }
793
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530794 zram->disk->major = zram_major;
795 zram->disk->first_minor = device_id;
796 zram->disk->fops = &zram_devops;
797 zram->disk->queue = zram->queue;
798 zram->disk->private_data = zram;
799 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530800
Nitin Gupta33863c22010-08-09 22:56:47 +0530801 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530802 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700803 /* zram devices sort of resembles non-rotational disks */
804 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530805 /*
806 * To ensure that we always get PAGE_SIZE aligned
807 * and n*PAGE_SIZED sized I/O requests.
808 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530809 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -0600810 blk_queue_logical_block_size(zram->disk->queue,
811 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530812 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
813 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530814
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530815 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530816
Nitin Gupta33863c22010-08-09 22:56:47 +0530817 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
818 &zram_disk_attr_group);
819 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700820 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800821 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +0530822 }
Nitin Gupta33863c22010-08-09 22:56:47 +0530823
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700824 zram->meta = NULL;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700825 zram->max_comp_streams = 1;
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800826 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530827
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800828out_free_disk:
829 del_gendisk(zram->disk);
830 put_disk(zram->disk);
831out_free_queue:
832 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530833out:
834 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530835}
836
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530837static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530838{
Nitin Gupta33863c22010-08-09 22:56:47 +0530839 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
840 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +0530841
Rashika Kheria59d3fe52013-10-30 18:43:32 +0530842 del_gendisk(zram->disk);
843 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530844
Rashika Kheria59d3fe52013-10-30 18:43:32 +0530845 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530846}
847
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530848static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530849{
Nitin Guptade1a21a2010-01-28 21:13:40 +0530850 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530851
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500852 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -0700853 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500854 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530855 ret = -EINVAL;
856 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530857 }
858
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530859 zram_major = register_blkdev(0, "zram");
860 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700861 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530862 ret = -EBUSY;
863 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530864 }
865
Nitin Gupta306b0c92009-09-22 10:26:53 +0530866 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500867 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -0600868 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +0530869 ret = -ENOMEM;
870 goto unregister;
871 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530872
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500873 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600874 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530875 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +0900876 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530877 }
878
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -0800879 pr_info("Created %u device(s) ...\n", num_devices);
880
Nitin Gupta306b0c92009-09-22 10:26:53 +0530881 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530882
Minchan Kim3bf040c2010-01-11 16:15:53 +0900883free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +0530884 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -0600885 destroy_device(&zram_devices[--dev_id]);
886 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530887unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530888 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530889out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530890 return ret;
891}
892
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530893static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530894{
895 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530896 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530897
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500898 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600899 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +0530900
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530901 destroy_device(zram);
Minchan Kim2b86ab92013-08-12 15:13:55 +0900902 /*
903 * Shouldn't access zram->disk after destroy_device
904 * because destroy_device already released zram->disk.
905 */
906 zram_reset_device(zram, false);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530907 }
908
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530909 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530910
Noah Watkins43801f62011-07-20 17:05:57 -0600911 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530912 pr_debug("Cleanup done!\n");
913}
914
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530915module_init(zram_init);
916module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530917
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300918module_param(num_devices, uint, 0);
919MODULE_PARM_DESC(num_devices, "Number of zram devices");
920
Nitin Gupta306b0c92009-09-22 10:26:53 +0530921MODULE_LICENSE("Dual BSD/GPL");
922MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530923MODULE_DESCRIPTION("Compressed RAM Block Device");