blob: f9711c520269e0e2765d1004467b2cfc71421957 [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/lzo.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053034#include <linux/vmalloc.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Nitin Gupta306b0c92009-09-22 10:26:53 +053041
Nitin Gupta306b0c92009-09-22 10:26:53 +053042/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080043static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053044
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030045static inline struct zram *dev_to_zram(struct device *dev)
46{
47 return (struct zram *)dev_to_disk(dev)->private_data;
48}
49
50static ssize_t disksize_show(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct zram *zram = dev_to_zram(dev);
54
55 return sprintf(buf, "%llu\n", zram->disksize);
56}
57
58static ssize_t initstate_show(struct device *dev,
59 struct device_attribute *attr, char *buf)
60{
61 struct zram *zram = dev_to_zram(dev);
62
63 return sprintf(buf, "%u\n", zram->init_done);
64}
65
66static ssize_t num_reads_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
71 return sprintf(buf, "%llu\n",
72 (u64)atomic64_read(&zram->stats.num_reads));
73}
74
75static ssize_t num_writes_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
77{
78 struct zram *zram = dev_to_zram(dev);
79
80 return sprintf(buf, "%llu\n",
81 (u64)atomic64_read(&zram->stats.num_writes));
82}
83
84static ssize_t invalid_io_show(struct device *dev,
85 struct device_attribute *attr, char *buf)
86{
87 struct zram *zram = dev_to_zram(dev);
88
89 return sprintf(buf, "%llu\n",
90 (u64)atomic64_read(&zram->stats.invalid_io));
91}
92
93static ssize_t notify_free_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
95{
96 struct zram *zram = dev_to_zram(dev);
97
98 return sprintf(buf, "%llu\n",
99 (u64)atomic64_read(&zram->stats.notify_free));
100}
101
102static ssize_t zero_pages_show(struct device *dev,
103 struct device_attribute *attr, char *buf)
104{
105 struct zram *zram = dev_to_zram(dev);
106
107 return sprintf(buf, "%u\n", zram->stats.pages_zero);
108}
109
110static ssize_t orig_data_size_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
112{
113 struct zram *zram = dev_to_zram(dev);
114
115 return sprintf(buf, "%llu\n",
116 (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
117}
118
119static ssize_t compr_data_size_show(struct device *dev,
120 struct device_attribute *attr, char *buf)
121{
122 struct zram *zram = dev_to_zram(dev);
123
124 return sprintf(buf, "%llu\n",
125 (u64)atomic64_read(&zram->stats.compr_size));
126}
127
128static ssize_t mem_used_total_show(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 u64 val = 0;
132 struct zram *zram = dev_to_zram(dev);
133 struct zram_meta *meta = zram->meta;
134
135 down_read(&zram->init_lock);
136 if (zram->init_done)
137 val = zs_get_total_size_bytes(meta->mem_pool);
138 up_read(&zram->init_lock);
139
140 return sprintf(buf, "%llu\n", val);
141}
142
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900143static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530144 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530145{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900146 return meta->table[index].flags & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530147}
148
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900149static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530150 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530151{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900152 meta->table[index].flags |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530153}
154
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900155static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530156 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530157{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900158 meta->table[index].flags &= ~BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530159}
160
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300161static inline int is_partial_io(struct bio_vec *bvec)
162{
163 return bvec->bv_len != PAGE_SIZE;
164}
165
166/*
167 * Check if request is within bounds and aligned on zram logical blocks.
168 */
169static inline int valid_io_request(struct zram *zram, struct bio *bio)
170{
171 u64 start, end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530172
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300173 /* unaligned request */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700174 if (unlikely(bio->bi_iter.bi_sector &
175 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300176 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700177 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300178 return 0;
179
Kent Overstreet4f024f32013-10-11 15:44:27 -0700180 start = bio->bi_iter.bi_sector;
181 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300182 bound = zram->disksize >> SECTOR_SHIFT;
183 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300184 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300185 return 0;
186
187 /* I/O request is valid */
188 return 1;
189}
190
191static void zram_meta_free(struct zram_meta *meta)
192{
193 zs_destroy_pool(meta->mem_pool);
194 kfree(meta->compress_workmem);
195 free_pages((unsigned long)meta->compress_buffer, 1);
196 vfree(meta->table);
197 kfree(meta);
198}
199
200static struct zram_meta *zram_meta_alloc(u64 disksize)
201{
202 size_t num_pages;
203 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
204 if (!meta)
205 goto out;
206
207 meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
208 if (!meta->compress_workmem)
209 goto free_meta;
210
211 meta->compress_buffer =
212 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
213 if (!meta->compress_buffer) {
214 pr_err("Error allocating compressor buffer space\n");
215 goto free_workmem;
216 }
217
218 num_pages = disksize >> PAGE_SHIFT;
219 meta->table = vzalloc(num_pages * sizeof(*meta->table));
220 if (!meta->table) {
221 pr_err("Error allocating zram address table\n");
222 goto free_buffer;
223 }
224
225 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
226 if (!meta->mem_pool) {
227 pr_err("Error creating memory pool\n");
228 goto free_table;
229 }
230
231 return meta;
232
233free_table:
234 vfree(meta->table);
235free_buffer:
236 free_pages((unsigned long)meta->compress_buffer, 1);
237free_workmem:
238 kfree(meta->compress_workmem);
239free_meta:
240 kfree(meta);
241 meta = NULL;
242out:
243 return meta;
244}
245
246static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
247{
248 if (*offset + bvec->bv_len >= PAGE_SIZE)
249 (*index)++;
250 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
251}
252
Nitin Gupta306b0c92009-09-22 10:26:53 +0530253static int page_zero_filled(void *ptr)
254{
255 unsigned int pos;
256 unsigned long *page;
257
258 page = (unsigned long *)ptr;
259
260 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
261 if (page[pos])
262 return 0;
263 }
264
265 return 1;
266}
267
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300268static void handle_zero_page(struct bio_vec *bvec)
269{
270 struct page *page = bvec->bv_page;
271 void *user_mem;
272
273 user_mem = kmap_atomic(page);
274 if (is_partial_io(bvec))
275 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
276 else
277 clear_page(user_mem);
278 kunmap_atomic(user_mem);
279
280 flush_dcache_page(page);
281}
282
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530283static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530284{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900285 struct zram_meta *meta = zram->meta;
286 unsigned long handle = meta->table[index].handle;
287 u16 size = meta->table[index].size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530288
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600289 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530290 /*
291 * No memory is allocated for zero filled pages.
292 * Simply clear zero page flag.
293 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900294 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
295 zram_clear_flag(meta, index, ZRAM_ZERO);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800296 zram->stats.pages_zero--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530297 }
298 return;
299 }
300
Minchan Kim130f3152012-06-08 15:39:27 +0900301 if (unlikely(size > max_zpage_size))
Davidlohr Buesod178a072013-01-01 21:24:29 -0800302 zram->stats.bad_compress--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530303
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900304 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530305
Minchan Kim130f3152012-06-08 15:39:27 +0900306 if (size <= PAGE_SIZE / 2)
Davidlohr Buesod178a072013-01-01 21:24:29 -0800307 zram->stats.good_compress--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530308
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800309 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800310 zram->stats.pages_stored--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530311
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900312 meta->table[index].handle = 0;
313 meta->table[index].size = 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530314}
315
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300316static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530317{
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300318 int ret = LZO_E_OK;
Jerome Marchand924bd882011-06-10 15:28:48 +0200319 size_t clen = PAGE_SIZE;
Jerome Marchand924bd882011-06-10 15:28:48 +0200320 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900321 struct zram_meta *meta = zram->meta;
322 unsigned long handle = meta->table[index].handle;
Jerome Marchand924bd882011-06-10 15:28:48 +0200323
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900324 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Jiang Liu42e99bd2013-06-07 00:07:30 +0800325 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200326 return 0;
327 }
328
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900329 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
330 if (meta->table[index].size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800331 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300332 else
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900333 ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300334 mem, &clen);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900335 zs_unmap_object(meta->mem_pool, handle);
Jerome Marchand924bd882011-06-10 15:28:48 +0200336
337 /* Should NEVER happen. Return bio error if it does. */
338 if (unlikely(ret != LZO_E_OK)) {
339 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800340 atomic64_inc(&zram->stats.failed_reads);
Jerome Marchand924bd882011-06-10 15:28:48 +0200341 return ret;
342 }
343
344 return 0;
345}
346
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300347static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
348 u32 index, int offset, struct bio *bio)
349{
350 int ret;
351 struct page *page;
352 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900353 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300354 page = bvec->bv_page;
355
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900356 if (unlikely(!meta->table[index].handle) ||
357 zram_test_flag(meta, index, ZRAM_ZERO)) {
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300358 handle_zero_page(bvec);
359 return 0;
360 }
361
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300362 if (is_partial_io(bvec))
363 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900364 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
365
366 user_mem = kmap_atomic(page);
367 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300368 uncmem = user_mem;
369
370 if (!uncmem) {
371 pr_info("Unable to allocate temp memory\n");
372 ret = -ENOMEM;
373 goto out_cleanup;
374 }
375
376 ret = zram_decompress_page(zram, uncmem, index);
377 /* Should NEVER happen. Return bio error if it does. */
Wanpeng Li25eeb662013-03-13 15:06:16 +0800378 if (unlikely(ret != LZO_E_OK))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300379 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300380
381 if (is_partial_io(bvec))
382 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
383 bvec->bv_len);
384
385 flush_dcache_page(page);
386 ret = 0;
387out_cleanup:
388 kunmap_atomic(user_mem);
389 if (is_partial_io(bvec))
390 kfree(uncmem);
391 return ret;
392}
393
Jerome Marchand924bd882011-06-10 15:28:48 +0200394static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
395 int offset)
396{
Nitin Gupta397c6062013-01-02 08:53:41 -0800397 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200398 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900399 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900400 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200401 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900402 struct zram_meta *meta = zram->meta;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200403
404 page = bvec->bv_page;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900405 src = meta->compress_buffer;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200406
Jerome Marchand924bd882011-06-10 15:28:48 +0200407 if (is_partial_io(bvec)) {
408 /*
409 * This is a partial IO. We need to read the full page
410 * before to write the changes.
411 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900412 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200413 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200414 ret = -ENOMEM;
415 goto out;
416 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300417 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800418 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200419 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200420 }
421
Cong Wangba82fe22011-11-25 23:14:25 +0800422 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200423
Nitin Gupta397c6062013-01-02 08:53:41 -0800424 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200425 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
426 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800427 kunmap_atomic(user_mem);
428 user_mem = NULL;
429 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200430 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800431 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200432
433 if (page_zero_filled(uncmem)) {
Cong Wangba82fe22011-11-25 23:14:25 +0800434 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900435 /* Free memory associated with this sector now. */
436 zram_free_page(zram, index);
437
Davidlohr Buesod178a072013-01-01 21:24:29 -0800438 zram->stats.pages_zero++;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900439 zram_set_flag(meta, index, ZRAM_ZERO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200440 ret = 0;
441 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200442 }
443
Minchan Kima0c516c2013-08-12 15:13:56 +0900444 /*
445 * zram_slot_free_notify could miss free so that let's
446 * double check.
447 */
448 if (unlikely(meta->table[index].handle ||
449 zram_test_flag(meta, index, ZRAM_ZERO)))
450 zram_free_page(zram, index);
451
Jerome Marchand924bd882011-06-10 15:28:48 +0200452 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900453 meta->compress_workmem);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200454
Nitin Gupta397c6062013-01-02 08:53:41 -0800455 if (!is_partial_io(bvec)) {
456 kunmap_atomic(user_mem);
457 user_mem = NULL;
458 uncmem = NULL;
459 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200460
461 if (unlikely(ret != LZO_E_OK)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200462 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200463 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200464 }
465
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700466 if (unlikely(clen > max_zpage_size)) {
Davidlohr Buesod178a072013-01-01 21:24:29 -0800467 zram->stats.bad_compress++;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700468 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800469 src = NULL;
470 if (is_partial_io(bvec))
471 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700472 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200473
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900474 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600475 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400476 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
477 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200478 ret = -ENOMEM;
479 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200480 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900481 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200482
Jiang Liu42e99bd2013-06-07 00:07:30 +0800483 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800484 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800485 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800486 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800487 } else {
488 memcpy(cmem, src, clen);
489 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200490
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900491 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600492
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900493 /*
494 * Free memory associated with this sector
495 * before overwriting unused sectors.
496 */
497 zram_free_page(zram, index);
498
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900499 meta->table[index].handle = handle;
500 meta->table[index].size = clen;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200501
502 /* Update stats */
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800503 atomic64_add(clen, &zram->stats.compr_size);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800504 zram->stats.pages_stored++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200505 if (clen <= PAGE_SIZE / 2)
Davidlohr Buesod178a072013-01-01 21:24:29 -0800506 zram->stats.good_compress++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200507
Jerome Marchand924bd882011-06-10 15:28:48 +0200508out:
Nitin Gupta397c6062013-01-02 08:53:41 -0800509 if (is_partial_io(bvec))
510 kfree(uncmem);
511
Jerome Marchand924bd882011-06-10 15:28:48 +0200512 if (ret)
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800513 atomic64_inc(&zram->stats.failed_writes);
Jerome Marchand924bd882011-06-10 15:28:48 +0200514 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200515}
516
Minchan Kima0c516c2013-08-12 15:13:56 +0900517static void handle_pending_slot_free(struct zram *zram)
518{
519 struct zram_slot_free *free_rq;
520
521 spin_lock(&zram->slot_free_lock);
522 while (zram->slot_free_rq) {
523 free_rq = zram->slot_free_rq;
524 zram->slot_free_rq = free_rq->next;
525 zram_free_page(zram, free_rq->index);
526 kfree(free_rq);
527 }
528 spin_unlock(&zram->slot_free_lock);
529}
530
Jerome Marchand8c921b22011-06-10 15:28:47 +0200531static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Jerome Marchand924bd882011-06-10 15:28:48 +0200532 int offset, struct bio *bio, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200533{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200534 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200535
Jerome Marchandc5bde232011-06-10 15:28:49 +0200536 if (rw == READ) {
537 down_read(&zram->lock);
Minchan Kima0c516c2013-08-12 15:13:56 +0900538 handle_pending_slot_free(zram);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200539 ret = zram_bvec_read(zram, bvec, index, offset, bio);
540 up_read(&zram->lock);
541 } else {
542 down_write(&zram->lock);
Minchan Kima0c516c2013-08-12 15:13:56 +0900543 handle_pending_slot_free(zram);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200544 ret = zram_bvec_write(zram, bvec, index, offset);
545 up_write(&zram->lock);
546 }
547
548 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200549}
550
Minchan Kim2b86ab92013-08-12 15:13:55 +0900551static void zram_reset_device(struct zram *zram, bool reset_capacity)
Jerome Marchand924bd882011-06-10 15:28:48 +0200552{
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300553 size_t index;
554 struct zram_meta *meta;
555
Minchan Kima0c516c2013-08-12 15:13:56 +0900556 flush_work(&zram->free_work);
557
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300558 down_write(&zram->init_lock);
559 if (!zram->init_done) {
560 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300561 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300562 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300563
564 meta = zram->meta;
565 zram->init_done = 0;
566
567 /* Free all pages that are still in this zram device */
568 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
569 unsigned long handle = meta->table[index].handle;
570 if (!handle)
571 continue;
572
573 zs_free(meta->mem_pool, handle);
574 }
575
576 zram_meta_free(zram->meta);
577 zram->meta = NULL;
578 /* Reset stats */
579 memset(&zram->stats, 0, sizeof(zram->stats));
580
581 zram->disksize = 0;
Minchan Kim2b86ab92013-08-12 15:13:55 +0900582 if (reset_capacity)
583 set_capacity(zram->disk, 0);
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300584 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300585}
586
587static void zram_init_device(struct zram *zram, struct zram_meta *meta)
588{
589 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
590 pr_info(
591 "There is little point creating a zram of greater than "
592 "twice the size of memory since we expect a 2:1 compression "
593 "ratio. Note that zram uses about 0.1%% of the size of "
594 "the disk when not in use so a huge zram is "
595 "wasteful.\n"
596 "\tMemory Size: %lu kB\n"
597 "\tSize you selected: %llu kB\n"
598 "Continuing anyway ...\n",
599 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
600 );
601 }
602
603 /* zram devices sort of resembles non-rotational disks */
604 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
605
606 zram->meta = meta;
607 zram->init_done = 1;
608
609 pr_debug("Initialization done!\n");
610}
611
612static ssize_t disksize_store(struct device *dev,
613 struct device_attribute *attr, const char *buf, size_t len)
614{
615 u64 disksize;
616 struct zram_meta *meta;
617 struct zram *zram = dev_to_zram(dev);
618
619 disksize = memparse(buf, NULL);
620 if (!disksize)
621 return -EINVAL;
622
623 disksize = PAGE_ALIGN(disksize);
624 meta = zram_meta_alloc(disksize);
625 down_write(&zram->init_lock);
626 if (zram->init_done) {
627 up_write(&zram->init_lock);
628 zram_meta_free(meta);
629 pr_info("Cannot change disksize for initialized device\n");
630 return -EBUSY;
631 }
632
633 zram->disksize = disksize;
634 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
635 zram_init_device(zram, meta);
636 up_write(&zram->init_lock);
637
638 return len;
639}
640
641static ssize_t reset_store(struct device *dev,
642 struct device_attribute *attr, const char *buf, size_t len)
643{
644 int ret;
645 unsigned short do_reset;
646 struct zram *zram;
647 struct block_device *bdev;
648
649 zram = dev_to_zram(dev);
650 bdev = bdget_disk(zram->disk, 0);
651
Rashika Kheria46a51c82013-10-30 18:36:32 +0530652 if (!bdev)
653 return -ENOMEM;
654
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300655 /* Do not reset an active device! */
Rashika Kheria1b672222013-11-10 22:13:53 +0530656 if (bdev->bd_holders) {
657 ret = -EBUSY;
658 goto out;
659 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300660
661 ret = kstrtou16(buf, 10, &do_reset);
662 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530663 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300664
Rashika Kheria1b672222013-11-10 22:13:53 +0530665 if (!do_reset) {
666 ret = -EINVAL;
667 goto out;
668 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300669
670 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530671 fsync_bdev(bdev);
Rashika Kheria1b672222013-11-10 22:13:53 +0530672 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300673
Minchan Kim2b86ab92013-08-12 15:13:55 +0900674 zram_reset_device(zram, true);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300675 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530676
677out:
678 bdput(bdev);
679 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200680}
681
682static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530683{
Kent Overstreet79886132013-11-23 17:19:00 -0800684 int offset;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530685 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800686 struct bio_vec bvec;
687 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530688
Jerome Marchand8c921b22011-06-10 15:28:47 +0200689 switch (rw) {
690 case READ:
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800691 atomic64_inc(&zram->stats.num_reads);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200692 break;
693 case WRITE:
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800694 atomic64_inc(&zram->stats.num_writes);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200695 break;
696 }
697
Kent Overstreet4f024f32013-10-11 15:44:27 -0700698 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
699 offset = (bio->bi_iter.bi_sector &
700 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530701
Kent Overstreet79886132013-11-23 17:19:00 -0800702 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200703 int max_transfer_size = PAGE_SIZE - offset;
704
Kent Overstreet79886132013-11-23 17:19:00 -0800705 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200706 /*
707 * zram_bvec_rw() can only make operation on a single
708 * zram page. Split the bio vector.
709 */
710 struct bio_vec bv;
711
Kent Overstreet79886132013-11-23 17:19:00 -0800712 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200713 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800714 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200715
716 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
717 goto out;
718
Kent Overstreet79886132013-11-23 17:19:00 -0800719 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200720 bv.bv_offset += max_transfer_size;
721 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
722 goto out;
723 } else
Kent Overstreet79886132013-11-23 17:19:00 -0800724 if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
Jerome Marchand924bd882011-06-10 15:28:48 +0200725 < 0)
726 goto out;
727
Kent Overstreet79886132013-11-23 17:19:00 -0800728 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530729 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530730
731 set_bit(BIO_UPTODATE, &bio->bi_flags);
732 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500733 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530734
735out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530736 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530737}
738
Nitin Gupta306b0c92009-09-22 10:26:53 +0530739/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530740 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530741 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200742static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530743{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530744 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530745
Jerome Marchand0900bea2011-09-06 15:02:11 +0200746 down_read(&zram->init_lock);
747 if (unlikely(!zram->init_done))
Minchan Kim3de738c2013-01-30 11:41:41 +0900748 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200749
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530750 if (!valid_io_request(zram, bio)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800751 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900752 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100753 }
754
Jerome Marchand8c921b22011-06-10 15:28:47 +0200755 __zram_make_request(zram, bio, bio_data_dir(bio));
Jerome Marchand0900bea2011-09-06 15:02:11 +0200756 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530757
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700758 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200759
Jerome Marchand0900bea2011-09-06 15:02:11 +0200760error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900761 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200762 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530763}
764
Minchan Kima0c516c2013-08-12 15:13:56 +0900765static void zram_slot_free(struct work_struct *work)
766{
767 struct zram *zram;
768
769 zram = container_of(work, struct zram, free_work);
770 down_write(&zram->lock);
771 handle_pending_slot_free(zram);
772 up_write(&zram->lock);
773}
774
775static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
776{
777 spin_lock(&zram->slot_free_lock);
778 free_rq->next = zram->slot_free_rq;
779 zram->slot_free_rq = free_rq;
780 spin_unlock(&zram->slot_free_lock);
781}
782
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400783static void zram_slot_free_notify(struct block_device *bdev,
784 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530785{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530786 struct zram *zram;
Minchan Kima0c516c2013-08-12 15:13:56 +0900787 struct zram_slot_free *free_rq;
Nitin Gupta107c1612010-05-17 11:02:44 +0530788
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530789 zram = bdev->bd_disk->private_data;
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800790 atomic64_inc(&zram->stats.notify_free);
Minchan Kima0c516c2013-08-12 15:13:56 +0900791
792 free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
793 if (!free_rq)
794 return;
795
796 free_rq->index = index;
797 add_slot_free(zram, free_rq);
798 schedule_work(&zram->free_work);
Nitin Gupta107c1612010-05-17 11:02:44 +0530799}
800
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530801static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530802 .swap_slot_free_notify = zram_slot_free_notify,
Nitin Gupta107c1612010-05-17 11:02:44 +0530803 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530804};
805
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300806static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
807 disksize_show, disksize_store);
808static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
809static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
810static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
811static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
812static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
813static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
814static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
815static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
816static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
817static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
818
819static struct attribute *zram_disk_attrs[] = {
820 &dev_attr_disksize.attr,
821 &dev_attr_initstate.attr,
822 &dev_attr_reset.attr,
823 &dev_attr_num_reads.attr,
824 &dev_attr_num_writes.attr,
825 &dev_attr_invalid_io.attr,
826 &dev_attr_notify_free.attr,
827 &dev_attr_zero_pages.attr,
828 &dev_attr_orig_data_size.attr,
829 &dev_attr_compr_data_size.attr,
830 &dev_attr_mem_used_total.attr,
831 NULL,
832};
833
834static struct attribute_group zram_disk_attr_group = {
835 .attrs = zram_disk_attrs,
836};
837
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530838static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530839{
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800840 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530841
Jerome Marchandc5bde232011-06-10 15:28:49 +0200842 init_rwsem(&zram->lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200843 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530844
Minchan Kima0c516c2013-08-12 15:13:56 +0900845 INIT_WORK(&zram->free_work, zram_slot_free);
846 spin_lock_init(&zram->slot_free_lock);
847 zram->slot_free_rq = NULL;
848
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530849 zram->queue = blk_alloc_queue(GFP_KERNEL);
850 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530851 pr_err("Error allocating disk queue for device %d\n",
852 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530853 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530854 }
855
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530856 blk_queue_make_request(zram->queue, zram_make_request);
857 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530858
859 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530860 zram->disk = alloc_disk(1);
861 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -0700862 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +0530863 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800864 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530865 }
866
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530867 zram->disk->major = zram_major;
868 zram->disk->first_minor = device_id;
869 zram->disk->fops = &zram_devops;
870 zram->disk->queue = zram->queue;
871 zram->disk->private_data = zram;
872 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530873
Nitin Gupta33863c22010-08-09 22:56:47 +0530874 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530875 set_capacity(zram->disk, 0);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530876
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530877 /*
878 * To ensure that we always get PAGE_SIZE aligned
879 * and n*PAGE_SIZED sized I/O requests.
880 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530881 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -0600882 blk_queue_logical_block_size(zram->disk->queue,
883 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530884 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
885 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530886
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530887 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530888
Nitin Gupta33863c22010-08-09 22:56:47 +0530889 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
890 &zram_disk_attr_group);
891 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700892 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800893 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +0530894 }
Nitin Gupta33863c22010-08-09 22:56:47 +0530895
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530896 zram->init_done = 0;
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800897 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530898
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800899out_free_disk:
900 del_gendisk(zram->disk);
901 put_disk(zram->disk);
902out_free_queue:
903 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530904out:
905 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530906}
907
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530908static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530909{
Nitin Gupta33863c22010-08-09 22:56:47 +0530910 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
911 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +0530912
Rashika Kheria59d3fe52013-10-30 18:43:32 +0530913 del_gendisk(zram->disk);
914 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530915
Rashika Kheria59d3fe52013-10-30 18:43:32 +0530916 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530917}
918
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530919static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530920{
Nitin Guptade1a21a2010-01-28 21:13:40 +0530921 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530922
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500923 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -0700924 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500925 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530926 ret = -EINVAL;
927 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530928 }
929
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530930 zram_major = register_blkdev(0, "zram");
931 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700932 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530933 ret = -EBUSY;
934 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530935 }
936
Nitin Gupta306b0c92009-09-22 10:26:53 +0530937 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500938 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -0600939 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +0530940 ret = -ENOMEM;
941 goto unregister;
942 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530943
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500944 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600945 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530946 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +0900947 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530948 }
949
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -0800950 pr_info("Created %u device(s) ...\n", num_devices);
951
Nitin Gupta306b0c92009-09-22 10:26:53 +0530952 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530953
Minchan Kim3bf040c2010-01-11 16:15:53 +0900954free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +0530955 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -0600956 destroy_device(&zram_devices[--dev_id]);
957 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530958unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530959 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530960out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530961 return ret;
962}
963
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530964static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530965{
966 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530967 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530968
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500969 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600970 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +0530971
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530972 destroy_device(zram);
Minchan Kim2b86ab92013-08-12 15:13:55 +0900973 /*
974 * Shouldn't access zram->disk after destroy_device
975 * because destroy_device already released zram->disk.
976 */
977 zram_reset_device(zram, false);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530978 }
979
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530980 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530981
Noah Watkins43801f62011-07-20 17:05:57 -0600982 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530983 pr_debug("Cleanup done!\n");
984}
985
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530986module_init(zram_init);
987module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530988
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300989module_param(num_devices, uint, 0);
990MODULE_PARM_DESC(num_devices, "Number of zram devices");
991
Nitin Gupta306b0c92009-09-22 10:26:53 +0530992MODULE_LICENSE("Dual BSD/GPL");
993MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530994MODULE_DESCRIPTION("Compressed RAM Block Device");