blob: 3f0d6de36f745cd827969f8f60744ac6b0342f53 [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/lzo.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053034#include <linux/vmalloc.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Nitin Gupta306b0c92009-09-22 10:26:53 +053041
Nitin Gupta306b0c92009-09-22 10:26:53 +053042/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080043static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053044
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070045static inline int init_done(struct zram *zram)
46{
47 return zram->meta != NULL;
48}
49
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030050static inline struct zram *dev_to_zram(struct device *dev)
51{
52 return (struct zram *)dev_to_disk(dev)->private_data;
53}
54
55static ssize_t disksize_show(struct device *dev,
56 struct device_attribute *attr, char *buf)
57{
58 struct zram *zram = dev_to_zram(dev);
59
60 return sprintf(buf, "%llu\n", zram->disksize);
61}
62
63static ssize_t initstate_show(struct device *dev,
64 struct device_attribute *attr, char *buf)
65{
66 struct zram *zram = dev_to_zram(dev);
67
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070068 return sprintf(buf, "%u\n", init_done(zram));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030069}
70
71static ssize_t num_reads_show(struct device *dev,
72 struct device_attribute *attr, char *buf)
73{
74 struct zram *zram = dev_to_zram(dev);
75
76 return sprintf(buf, "%llu\n",
77 (u64)atomic64_read(&zram->stats.num_reads));
78}
79
80static ssize_t num_writes_show(struct device *dev,
81 struct device_attribute *attr, char *buf)
82{
83 struct zram *zram = dev_to_zram(dev);
84
85 return sprintf(buf, "%llu\n",
86 (u64)atomic64_read(&zram->stats.num_writes));
87}
88
89static ssize_t invalid_io_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
91{
92 struct zram *zram = dev_to_zram(dev);
93
94 return sprintf(buf, "%llu\n",
95 (u64)atomic64_read(&zram->stats.invalid_io));
96}
97
98static ssize_t notify_free_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct zram *zram = dev_to_zram(dev);
102
103 return sprintf(buf, "%llu\n",
104 (u64)atomic64_read(&zram->stats.notify_free));
105}
106
107static ssize_t zero_pages_show(struct device *dev,
108 struct device_attribute *attr, char *buf)
109{
110 struct zram *zram = dev_to_zram(dev);
111
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800112 return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300113}
114
115static ssize_t orig_data_size_show(struct device *dev,
116 struct device_attribute *attr, char *buf)
117{
118 struct zram *zram = dev_to_zram(dev);
119
120 return sprintf(buf, "%llu\n",
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800121 (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300122}
123
124static ssize_t compr_data_size_show(struct device *dev,
125 struct device_attribute *attr, char *buf)
126{
127 struct zram *zram = dev_to_zram(dev);
128
129 return sprintf(buf, "%llu\n",
130 (u64)atomic64_read(&zram->stats.compr_size));
131}
132
133static ssize_t mem_used_total_show(struct device *dev,
134 struct device_attribute *attr, char *buf)
135{
136 u64 val = 0;
137 struct zram *zram = dev_to_zram(dev);
138 struct zram_meta *meta = zram->meta;
139
140 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700141 if (init_done(zram))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300142 val = zs_get_total_size_bytes(meta->mem_pool);
143 up_read(&zram->init_lock);
144
145 return sprintf(buf, "%llu\n", val);
146}
147
Minchan Kim92967472014-01-30 15:46:03 -0800148/* flag operations needs meta->tb_lock */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900149static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530150 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530151{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900152 return meta->table[index].flags & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530153}
154
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900155static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530156 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530157{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900158 meta->table[index].flags |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530159}
160
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900161static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530162 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530163{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900164 meta->table[index].flags &= ~BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530165}
166
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300167static inline int is_partial_io(struct bio_vec *bvec)
168{
169 return bvec->bv_len != PAGE_SIZE;
170}
171
172/*
173 * Check if request is within bounds and aligned on zram logical blocks.
174 */
175static inline int valid_io_request(struct zram *zram, struct bio *bio)
176{
177 u64 start, end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530178
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300179 /* unaligned request */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700180 if (unlikely(bio->bi_iter.bi_sector &
181 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300182 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700183 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300184 return 0;
185
Kent Overstreet4f024f32013-10-11 15:44:27 -0700186 start = bio->bi_iter.bi_sector;
187 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300188 bound = zram->disksize >> SECTOR_SHIFT;
189 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300190 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300191 return 0;
192
193 /* I/O request is valid */
194 return 1;
195}
196
197static void zram_meta_free(struct zram_meta *meta)
198{
199 zs_destroy_pool(meta->mem_pool);
200 kfree(meta->compress_workmem);
201 free_pages((unsigned long)meta->compress_buffer, 1);
202 vfree(meta->table);
203 kfree(meta);
204}
205
206static struct zram_meta *zram_meta_alloc(u64 disksize)
207{
208 size_t num_pages;
209 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
210 if (!meta)
211 goto out;
212
213 meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
214 if (!meta->compress_workmem)
215 goto free_meta;
216
217 meta->compress_buffer =
218 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
219 if (!meta->compress_buffer) {
220 pr_err("Error allocating compressor buffer space\n");
221 goto free_workmem;
222 }
223
224 num_pages = disksize >> PAGE_SHIFT;
225 meta->table = vzalloc(num_pages * sizeof(*meta->table));
226 if (!meta->table) {
227 pr_err("Error allocating zram address table\n");
228 goto free_buffer;
229 }
230
231 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
232 if (!meta->mem_pool) {
233 pr_err("Error creating memory pool\n");
234 goto free_table;
235 }
236
Minchan Kim92967472014-01-30 15:46:03 -0800237 rwlock_init(&meta->tb_lock);
Minchan Kime46e3312014-01-30 15:46:06 -0800238 mutex_init(&meta->buffer_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300239 return meta;
240
241free_table:
242 vfree(meta->table);
243free_buffer:
244 free_pages((unsigned long)meta->compress_buffer, 1);
245free_workmem:
246 kfree(meta->compress_workmem);
247free_meta:
248 kfree(meta);
249 meta = NULL;
250out:
251 return meta;
252}
253
254static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
255{
256 if (*offset + bvec->bv_len >= PAGE_SIZE)
257 (*index)++;
258 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
259}
260
Nitin Gupta306b0c92009-09-22 10:26:53 +0530261static int page_zero_filled(void *ptr)
262{
263 unsigned int pos;
264 unsigned long *page;
265
266 page = (unsigned long *)ptr;
267
268 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
269 if (page[pos])
270 return 0;
271 }
272
273 return 1;
274}
275
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300276static void handle_zero_page(struct bio_vec *bvec)
277{
278 struct page *page = bvec->bv_page;
279 void *user_mem;
280
281 user_mem = kmap_atomic(page);
282 if (is_partial_io(bvec))
283 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
284 else
285 clear_page(user_mem);
286 kunmap_atomic(user_mem);
287
288 flush_dcache_page(page);
289}
290
Minchan Kim92967472014-01-30 15:46:03 -0800291/* NOTE: caller should hold meta->tb_lock with write-side */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530292static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530293{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900294 struct zram_meta *meta = zram->meta;
295 unsigned long handle = meta->table[index].handle;
296 u16 size = meta->table[index].size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530297
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600298 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530299 /*
300 * No memory is allocated for zero filled pages.
301 * Simply clear zero page flag.
302 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900303 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
304 zram_clear_flag(meta, index, ZRAM_ZERO);
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800305 atomic_dec(&zram->stats.pages_zero);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530306 }
307 return;
308 }
309
Minchan Kim130f3152012-06-08 15:39:27 +0900310 if (unlikely(size > max_zpage_size))
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800311 atomic_dec(&zram->stats.bad_compress);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530312
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900313 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530314
Minchan Kim130f3152012-06-08 15:39:27 +0900315 if (size <= PAGE_SIZE / 2)
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800316 atomic_dec(&zram->stats.good_compress);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530317
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800318 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800319 atomic_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530320
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900321 meta->table[index].handle = 0;
322 meta->table[index].size = 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530323}
324
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300325static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530326{
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300327 int ret = LZO_E_OK;
Jerome Marchand924bd882011-06-10 15:28:48 +0200328 size_t clen = PAGE_SIZE;
Jerome Marchand924bd882011-06-10 15:28:48 +0200329 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900330 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800331 unsigned long handle;
332 u16 size;
333
334 read_lock(&meta->tb_lock);
335 handle = meta->table[index].handle;
336 size = meta->table[index].size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200337
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900338 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim92967472014-01-30 15:46:03 -0800339 read_unlock(&meta->tb_lock);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800340 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200341 return 0;
342 }
343
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900344 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim92967472014-01-30 15:46:03 -0800345 if (size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800346 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300347 else
Minchan Kim92967472014-01-30 15:46:03 -0800348 ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900349 zs_unmap_object(meta->mem_pool, handle);
Minchan Kim92967472014-01-30 15:46:03 -0800350 read_unlock(&meta->tb_lock);
Jerome Marchand924bd882011-06-10 15:28:48 +0200351
352 /* Should NEVER happen. Return bio error if it does. */
353 if (unlikely(ret != LZO_E_OK)) {
354 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800355 atomic64_inc(&zram->stats.failed_reads);
Jerome Marchand924bd882011-06-10 15:28:48 +0200356 return ret;
357 }
358
359 return 0;
360}
361
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300362static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
363 u32 index, int offset, struct bio *bio)
364{
365 int ret;
366 struct page *page;
367 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900368 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300369 page = bvec->bv_page;
370
Minchan Kim92967472014-01-30 15:46:03 -0800371 read_lock(&meta->tb_lock);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900372 if (unlikely(!meta->table[index].handle) ||
373 zram_test_flag(meta, index, ZRAM_ZERO)) {
Minchan Kim92967472014-01-30 15:46:03 -0800374 read_unlock(&meta->tb_lock);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300375 handle_zero_page(bvec);
376 return 0;
377 }
Minchan Kim92967472014-01-30 15:46:03 -0800378 read_unlock(&meta->tb_lock);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300379
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300380 if (is_partial_io(bvec))
381 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900382 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
383
384 user_mem = kmap_atomic(page);
385 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300386 uncmem = user_mem;
387
388 if (!uncmem) {
389 pr_info("Unable to allocate temp memory\n");
390 ret = -ENOMEM;
391 goto out_cleanup;
392 }
393
394 ret = zram_decompress_page(zram, uncmem, index);
395 /* Should NEVER happen. Return bio error if it does. */
Wanpeng Li25eeb662013-03-13 15:06:16 +0800396 if (unlikely(ret != LZO_E_OK))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300397 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300398
399 if (is_partial_io(bvec))
400 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
401 bvec->bv_len);
402
403 flush_dcache_page(page);
404 ret = 0;
405out_cleanup:
406 kunmap_atomic(user_mem);
407 if (is_partial_io(bvec))
408 kfree(uncmem);
409 return ret;
410}
411
Jerome Marchand924bd882011-06-10 15:28:48 +0200412static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
413 int offset)
414{
Nitin Gupta397c6062013-01-02 08:53:41 -0800415 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200416 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900417 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900418 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200419 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900420 struct zram_meta *meta = zram->meta;
Minchan Kime46e3312014-01-30 15:46:06 -0800421 bool locked = false;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200422
423 page = bvec->bv_page;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900424 src = meta->compress_buffer;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200425
Jerome Marchand924bd882011-06-10 15:28:48 +0200426 if (is_partial_io(bvec)) {
427 /*
428 * This is a partial IO. We need to read the full page
429 * before to write the changes.
430 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900431 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200432 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200433 ret = -ENOMEM;
434 goto out;
435 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300436 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800437 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200438 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200439 }
440
Minchan Kime46e3312014-01-30 15:46:06 -0800441 mutex_lock(&meta->buffer_lock);
442 locked = true;
Cong Wangba82fe22011-11-25 23:14:25 +0800443 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200444
Nitin Gupta397c6062013-01-02 08:53:41 -0800445 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200446 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
447 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800448 kunmap_atomic(user_mem);
449 user_mem = NULL;
450 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200451 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800452 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200453
454 if (page_zero_filled(uncmem)) {
Cong Wangba82fe22011-11-25 23:14:25 +0800455 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900456 /* Free memory associated with this sector now. */
Minchan Kim92967472014-01-30 15:46:03 -0800457 write_lock(&zram->meta->tb_lock);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900458 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800459 zram_set_flag(meta, index, ZRAM_ZERO);
460 write_unlock(&zram->meta->tb_lock);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900461
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800462 atomic_inc(&zram->stats.pages_zero);
Jerome Marchand924bd882011-06-10 15:28:48 +0200463 ret = 0;
464 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200465 }
466
Jerome Marchand924bd882011-06-10 15:28:48 +0200467 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900468 meta->compress_workmem);
Nitin Gupta397c6062013-01-02 08:53:41 -0800469 if (!is_partial_io(bvec)) {
470 kunmap_atomic(user_mem);
471 user_mem = NULL;
472 uncmem = NULL;
473 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200474
475 if (unlikely(ret != LZO_E_OK)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200476 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200477 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200478 }
479
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700480 if (unlikely(clen > max_zpage_size)) {
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800481 atomic_inc(&zram->stats.bad_compress);
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700482 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800483 src = NULL;
484 if (is_partial_io(bvec))
485 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700486 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200487
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900488 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600489 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400490 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
491 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200492 ret = -ENOMEM;
493 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200494 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900495 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200496
Jiang Liu42e99bd2013-06-07 00:07:30 +0800497 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800498 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800499 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800500 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800501 } else {
502 memcpy(cmem, src, clen);
503 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200504
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900505 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600506
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900507 /*
508 * Free memory associated with this sector
509 * before overwriting unused sectors.
510 */
Minchan Kim92967472014-01-30 15:46:03 -0800511 write_lock(&zram->meta->tb_lock);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900512 zram_free_page(zram, index);
513
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900514 meta->table[index].handle = handle;
515 meta->table[index].size = clen;
Minchan Kim92967472014-01-30 15:46:03 -0800516 write_unlock(&zram->meta->tb_lock);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200517
518 /* Update stats */
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800519 atomic64_add(clen, &zram->stats.compr_size);
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800520 atomic_inc(&zram->stats.pages_stored);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200521 if (clen <= PAGE_SIZE / 2)
Minchan Kimdeb0bde2014-01-30 15:46:02 -0800522 atomic_inc(&zram->stats.good_compress);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200523
Jerome Marchand924bd882011-06-10 15:28:48 +0200524out:
Minchan Kime46e3312014-01-30 15:46:06 -0800525 if (locked)
526 mutex_unlock(&meta->buffer_lock);
Nitin Gupta397c6062013-01-02 08:53:41 -0800527 if (is_partial_io(bvec))
528 kfree(uncmem);
529
Jerome Marchand924bd882011-06-10 15:28:48 +0200530 if (ret)
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800531 atomic64_inc(&zram->stats.failed_writes);
Jerome Marchand924bd882011-06-10 15:28:48 +0200532 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200533}
534
535static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700536 int offset, struct bio *bio)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200537{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200538 int ret;
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700539 int rw = bio_data_dir(bio);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200540
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700541 if (rw == READ) {
542 atomic64_inc(&zram->stats.num_reads);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200543 ret = zram_bvec_read(zram, bvec, index, offset, bio);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700544 } else {
545 atomic64_inc(&zram->stats.num_writes);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200546 ret = zram_bvec_write(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700547 }
Jerome Marchandc5bde232011-06-10 15:28:49 +0200548
549 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200550}
551
Minchan Kim2b86ab92013-08-12 15:13:55 +0900552static void zram_reset_device(struct zram *zram, bool reset_capacity)
Jerome Marchand924bd882011-06-10 15:28:48 +0200553{
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300554 size_t index;
555 struct zram_meta *meta;
556
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300557 down_write(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700558 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300559 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300560 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300561 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300562
563 meta = zram->meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300564 /* Free all pages that are still in this zram device */
565 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
566 unsigned long handle = meta->table[index].handle;
567 if (!handle)
568 continue;
569
570 zs_free(meta->mem_pool, handle);
571 }
572
573 zram_meta_free(zram->meta);
574 zram->meta = NULL;
575 /* Reset stats */
576 memset(&zram->stats, 0, sizeof(zram->stats));
577
578 zram->disksize = 0;
Minchan Kim2b86ab92013-08-12 15:13:55 +0900579 if (reset_capacity)
580 set_capacity(zram->disk, 0);
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300581 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300582}
583
584static void zram_init_device(struct zram *zram, struct zram_meta *meta)
585{
586 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
587 pr_info(
588 "There is little point creating a zram of greater than "
589 "twice the size of memory since we expect a 2:1 compression "
590 "ratio. Note that zram uses about 0.1%% of the size of "
591 "the disk when not in use so a huge zram is "
592 "wasteful.\n"
593 "\tMemory Size: %lu kB\n"
594 "\tSize you selected: %llu kB\n"
595 "Continuing anyway ...\n",
596 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
597 );
598 }
599
600 /* zram devices sort of resembles non-rotational disks */
601 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
602
603 zram->meta = meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300604 pr_debug("Initialization done!\n");
605}
606
607static ssize_t disksize_store(struct device *dev,
608 struct device_attribute *attr, const char *buf, size_t len)
609{
610 u64 disksize;
611 struct zram_meta *meta;
612 struct zram *zram = dev_to_zram(dev);
613
614 disksize = memparse(buf, NULL);
615 if (!disksize)
616 return -EINVAL;
617
618 disksize = PAGE_ALIGN(disksize);
619 meta = zram_meta_alloc(disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800620 if (!meta)
621 return -ENOMEM;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300622 down_write(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700623 if (init_done(zram)) {
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300624 up_write(&zram->init_lock);
625 zram_meta_free(meta);
626 pr_info("Cannot change disksize for initialized device\n");
627 return -EBUSY;
628 }
629
630 zram->disksize = disksize;
631 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
632 zram_init_device(zram, meta);
633 up_write(&zram->init_lock);
634
635 return len;
636}
637
638static ssize_t reset_store(struct device *dev,
639 struct device_attribute *attr, const char *buf, size_t len)
640{
641 int ret;
642 unsigned short do_reset;
643 struct zram *zram;
644 struct block_device *bdev;
645
646 zram = dev_to_zram(dev);
647 bdev = bdget_disk(zram->disk, 0);
648
Rashika Kheria46a51c82013-10-30 18:36:32 +0530649 if (!bdev)
650 return -ENOMEM;
651
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300652 /* Do not reset an active device! */
Rashika Kheria1b672222013-11-10 22:13:53 +0530653 if (bdev->bd_holders) {
654 ret = -EBUSY;
655 goto out;
656 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300657
658 ret = kstrtou16(buf, 10, &do_reset);
659 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530660 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300661
Rashika Kheria1b672222013-11-10 22:13:53 +0530662 if (!do_reset) {
663 ret = -EINVAL;
664 goto out;
665 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300666
667 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530668 fsync_bdev(bdev);
Rashika Kheria1b672222013-11-10 22:13:53 +0530669 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300670
Minchan Kim2b86ab92013-08-12 15:13:55 +0900671 zram_reset_device(zram, true);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300672 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530673
674out:
675 bdput(bdev);
676 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200677}
678
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700679static void __zram_make_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530680{
Kent Overstreet79886132013-11-23 17:19:00 -0800681 int offset;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530682 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800683 struct bio_vec bvec;
684 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530685
Kent Overstreet4f024f32013-10-11 15:44:27 -0700686 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
687 offset = (bio->bi_iter.bi_sector &
688 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530689
Kent Overstreet79886132013-11-23 17:19:00 -0800690 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200691 int max_transfer_size = PAGE_SIZE - offset;
692
Kent Overstreet79886132013-11-23 17:19:00 -0800693 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200694 /*
695 * zram_bvec_rw() can only make operation on a single
696 * zram page. Split the bio vector.
697 */
698 struct bio_vec bv;
699
Kent Overstreet79886132013-11-23 17:19:00 -0800700 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200701 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800702 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200703
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700704 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200705 goto out;
706
Kent Overstreet79886132013-11-23 17:19:00 -0800707 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200708 bv.bv_offset += max_transfer_size;
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700709 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200710 goto out;
711 } else
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700712 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200713 goto out;
714
Kent Overstreet79886132013-11-23 17:19:00 -0800715 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530716 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530717
718 set_bit(BIO_UPTODATE, &bio->bi_flags);
719 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500720 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530721
722out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530723 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530724}
725
Nitin Gupta306b0c92009-09-22 10:26:53 +0530726/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530727 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530728 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200729static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530730{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530731 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530732
Jerome Marchand0900bea2011-09-06 15:02:11 +0200733 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700734 if (unlikely(!init_done(zram)))
Minchan Kim3de738c2013-01-30 11:41:41 +0900735 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200736
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530737 if (!valid_io_request(zram, bio)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800738 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900739 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100740 }
741
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700742 __zram_make_request(zram, bio);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200743 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530744
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700745 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200746
Jerome Marchand0900bea2011-09-06 15:02:11 +0200747error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900748 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200749 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530750}
751
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400752static void zram_slot_free_notify(struct block_device *bdev,
753 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530754{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530755 struct zram *zram;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800756 struct zram_meta *meta;
Nitin Gupta107c1612010-05-17 11:02:44 +0530757
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530758 zram = bdev->bd_disk->private_data;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800759 meta = zram->meta;
760
761 write_lock(&meta->tb_lock);
762 zram_free_page(zram, index);
763 write_unlock(&meta->tb_lock);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800764 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530765}
766
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530767static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530768 .swap_slot_free_notify = zram_slot_free_notify,
Nitin Gupta107c1612010-05-17 11:02:44 +0530769 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530770};
771
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300772static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
773 disksize_show, disksize_store);
774static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
775static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
776static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
777static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
778static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
779static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
780static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
781static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
782static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
783static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
784
785static struct attribute *zram_disk_attrs[] = {
786 &dev_attr_disksize.attr,
787 &dev_attr_initstate.attr,
788 &dev_attr_reset.attr,
789 &dev_attr_num_reads.attr,
790 &dev_attr_num_writes.attr,
791 &dev_attr_invalid_io.attr,
792 &dev_attr_notify_free.attr,
793 &dev_attr_zero_pages.attr,
794 &dev_attr_orig_data_size.attr,
795 &dev_attr_compr_data_size.attr,
796 &dev_attr_mem_used_total.attr,
797 NULL,
798};
799
800static struct attribute_group zram_disk_attr_group = {
801 .attrs = zram_disk_attrs,
802};
803
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530804static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530805{
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800806 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530807
Jerome Marchand0900bea2011-09-06 15:02:11 +0200808 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530809
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530810 zram->queue = blk_alloc_queue(GFP_KERNEL);
811 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530812 pr_err("Error allocating disk queue for device %d\n",
813 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530814 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530815 }
816
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530817 blk_queue_make_request(zram->queue, zram_make_request);
818 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530819
820 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530821 zram->disk = alloc_disk(1);
822 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -0700823 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +0530824 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800825 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530826 }
827
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530828 zram->disk->major = zram_major;
829 zram->disk->first_minor = device_id;
830 zram->disk->fops = &zram_devops;
831 zram->disk->queue = zram->queue;
832 zram->disk->private_data = zram;
833 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530834
Nitin Gupta33863c22010-08-09 22:56:47 +0530835 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530836 set_capacity(zram->disk, 0);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530837
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530838 /*
839 * To ensure that we always get PAGE_SIZE aligned
840 * and n*PAGE_SIZED sized I/O requests.
841 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530842 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -0600843 blk_queue_logical_block_size(zram->disk->queue,
844 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530845 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
846 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530847
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530848 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530849
Nitin Gupta33863c22010-08-09 22:56:47 +0530850 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
851 &zram_disk_attr_group);
852 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700853 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800854 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +0530855 }
Nitin Gupta33863c22010-08-09 22:56:47 +0530856
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700857 zram->meta = NULL;
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800858 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530859
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800860out_free_disk:
861 del_gendisk(zram->disk);
862 put_disk(zram->disk);
863out_free_queue:
864 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530865out:
866 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530867}
868
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530869static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530870{
Nitin Gupta33863c22010-08-09 22:56:47 +0530871 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
872 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +0530873
Rashika Kheria59d3fe52013-10-30 18:43:32 +0530874 del_gendisk(zram->disk);
875 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530876
Rashika Kheria59d3fe52013-10-30 18:43:32 +0530877 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530878}
879
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530880static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530881{
Nitin Guptade1a21a2010-01-28 21:13:40 +0530882 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530883
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500884 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -0700885 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500886 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530887 ret = -EINVAL;
888 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530889 }
890
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530891 zram_major = register_blkdev(0, "zram");
892 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700893 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530894 ret = -EBUSY;
895 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530896 }
897
Nitin Gupta306b0c92009-09-22 10:26:53 +0530898 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500899 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -0600900 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +0530901 ret = -ENOMEM;
902 goto unregister;
903 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530904
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500905 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600906 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530907 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +0900908 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530909 }
910
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -0800911 pr_info("Created %u device(s) ...\n", num_devices);
912
Nitin Gupta306b0c92009-09-22 10:26:53 +0530913 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530914
Minchan Kim3bf040c2010-01-11 16:15:53 +0900915free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +0530916 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -0600917 destroy_device(&zram_devices[--dev_id]);
918 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530919unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530920 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530921out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530922 return ret;
923}
924
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530925static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530926{
927 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530928 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530929
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500930 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600931 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +0530932
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530933 destroy_device(zram);
Minchan Kim2b86ab92013-08-12 15:13:55 +0900934 /*
935 * Shouldn't access zram->disk after destroy_device
936 * because destroy_device already released zram->disk.
937 */
938 zram_reset_device(zram, false);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530939 }
940
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530941 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530942
Noah Watkins43801f62011-07-20 17:05:57 -0600943 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530944 pr_debug("Cleanup done!\n");
945}
946
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530947module_init(zram_init);
948module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530949
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300950module_param(num_devices, uint, 0);
951MODULE_PARM_DESC(num_devices, "Number of zram devices");
952
Nitin Gupta306b0c92009-09-22 10:26:53 +0530953MODULE_LICENSE("Dual BSD/GPL");
954MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530955MODULE_DESCRIPTION("Compressed RAM Block Device");