blob: 85055c4fc2f261a34afc35da2c55881919edb13f [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Nitin Gupta306b0c92009-09-22 10:26:53 +05305 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/lzo.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053034#include <linux/vmalloc.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Noah Watkins43801f62011-07-20 17:05:57 -060040struct zram *zram_devices;
Nitin Gupta306b0c92009-09-22 10:26:53 +053041
Nitin Gupta306b0c92009-09-22 10:26:53 +053042/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080043static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053044
Nitin Gupta33863c22010-08-09 22:56:47 +053045static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
46{
47 spin_lock(&zram->stat64_lock);
48 *v = *v + inc;
49 spin_unlock(&zram->stat64_lock);
50}
51
52static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
53{
54 spin_lock(&zram->stat64_lock);
55 *v = *v - dec;
56 spin_unlock(&zram->stat64_lock);
57}
58
59static void zram_stat64_inc(struct zram *zram, u64 *v)
60{
61 zram_stat64_add(zram, v, 1);
62}
Nitin Gupta306b0c92009-09-22 10:26:53 +053063
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053064static int zram_test_flag(struct zram *zram, u32 index,
65 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +053066{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053067 return zram->table[index].flags & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +053068}
69
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053070static void zram_set_flag(struct zram *zram, u32 index,
71 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +053072{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053073 zram->table[index].flags |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +053074}
75
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053076static void zram_clear_flag(struct zram *zram, u32 index,
77 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +053078{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053079 zram->table[index].flags &= ~BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +053080}
81
82static int page_zero_filled(void *ptr)
83{
84 unsigned int pos;
85 unsigned long *page;
86
87 page = (unsigned long *)ptr;
88
89 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
90 if (page[pos])
91 return 0;
92 }
93
94 return 1;
95}
96
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053097static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +053098{
Minchan Kimc2344342012-06-08 15:39:25 +090099 unsigned long handle = zram->table[index].handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900100 u16 size = zram->table[index].size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530101
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600102 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530103 /*
104 * No memory is allocated for zero filled pages.
105 * Simply clear zero page flag.
106 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530107 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
108 zram_clear_flag(zram, index, ZRAM_ZERO);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800109 zram->stats.pages_zero--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530110 }
111 return;
112 }
113
Minchan Kim130f3152012-06-08 15:39:27 +0900114 if (unlikely(size > max_zpage_size))
Davidlohr Buesod178a072013-01-01 21:24:29 -0800115 zram->stats.bad_compress--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530116
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600117 zs_free(zram->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530118
Minchan Kim130f3152012-06-08 15:39:27 +0900119 if (size <= PAGE_SIZE / 2)
Davidlohr Buesod178a072013-01-01 21:24:29 -0800120 zram->stats.good_compress--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530121
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600122 zram_stat64_sub(zram, &zram->stats.compr_size,
123 zram->table[index].size);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800124 zram->stats.pages_stored--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530125
Minchan Kimc2344342012-06-08 15:39:25 +0900126 zram->table[index].handle = 0;
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600127 zram->table[index].size = 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530128}
129
Jerome Marchand924bd882011-06-10 15:28:48 +0200130static void handle_zero_page(struct bio_vec *bvec)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530131{
Jerome Marchand924bd882011-06-10 15:28:48 +0200132 struct page *page = bvec->bv_page;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530133 void *user_mem;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530134
Cong Wangba82fe22011-11-25 23:14:25 +0800135 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200136 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
Cong Wangba82fe22011-11-25 23:14:25 +0800137 kunmap_atomic(user_mem);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530138
Nitin Gupta30fb8a72009-12-12 11:44:46 +0530139 flush_dcache_page(page);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530140}
141
Jerome Marchand924bd882011-06-10 15:28:48 +0200142static inline int is_partial_io(struct bio_vec *bvec)
143{
144 return bvec->bv_len != PAGE_SIZE;
145}
146
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300147static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530148{
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300149 int ret = LZO_E_OK;
Jerome Marchand924bd882011-06-10 15:28:48 +0200150 size_t clen = PAGE_SIZE;
Jerome Marchand924bd882011-06-10 15:28:48 +0200151 unsigned char *cmem;
Minchan Kim374a6912012-06-08 15:39:26 +0900152 unsigned long handle = zram->table[index].handle;
Jerome Marchand924bd882011-06-10 15:28:48 +0200153
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300154 if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200155 memset(mem, 0, PAGE_SIZE);
156 return 0;
157 }
158
Seth Jenningsb7418512012-07-02 16:15:52 -0500159 cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300160 if (zram->table[index].size == PAGE_SIZE)
161 memcpy(mem, cmem, PAGE_SIZE);
162 else
163 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
164 mem, &clen);
Minchan Kim374a6912012-06-08 15:39:26 +0900165 zs_unmap_object(zram->mem_pool, handle);
Jerome Marchand924bd882011-06-10 15:28:48 +0200166
167 /* Should NEVER happen. Return bio error if it does. */
168 if (unlikely(ret != LZO_E_OK)) {
169 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
170 zram_stat64_inc(zram, &zram->stats.failed_reads);
171 return ret;
172 }
173
174 return 0;
175}
176
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300177static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
178 u32 index, int offset, struct bio *bio)
179{
180 int ret;
181 struct page *page;
182 unsigned char *user_mem, *uncmem = NULL;
183
184 page = bvec->bv_page;
185
186 if (unlikely(!zram->table[index].handle) ||
187 zram_test_flag(zram, index, ZRAM_ZERO)) {
188 handle_zero_page(bvec);
189 return 0;
190 }
191
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300192 if (is_partial_io(bvec))
193 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900194 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
195
196 user_mem = kmap_atomic(page);
197 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300198 uncmem = user_mem;
199
200 if (!uncmem) {
201 pr_info("Unable to allocate temp memory\n");
202 ret = -ENOMEM;
203 goto out_cleanup;
204 }
205
206 ret = zram_decompress_page(zram, uncmem, index);
207 /* Should NEVER happen. Return bio error if it does. */
208 if (unlikely(ret != LZO_E_OK)) {
209 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
210 zram_stat64_inc(zram, &zram->stats.failed_reads);
211 goto out_cleanup;
212 }
213
214 if (is_partial_io(bvec))
215 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
216 bvec->bv_len);
217
218 flush_dcache_page(page);
219 ret = 0;
220out_cleanup:
221 kunmap_atomic(user_mem);
222 if (is_partial_io(bvec))
223 kfree(uncmem);
224 return ret;
225}
226
Jerome Marchand924bd882011-06-10 15:28:48 +0200227static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
228 int offset)
229{
Nitin Gupta397c6062013-01-02 08:53:41 -0800230 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200231 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900232 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900233 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200234 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200235
236 page = bvec->bv_page;
237 src = zram->compress_buffer;
238
Jerome Marchand924bd882011-06-10 15:28:48 +0200239 if (is_partial_io(bvec)) {
240 /*
241 * This is a partial IO. We need to read the full page
242 * before to write the changes.
243 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900244 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200245 if (!uncmem) {
246 pr_info("Error allocating temp memory!\n");
247 ret = -ENOMEM;
248 goto out;
249 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300250 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800251 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200252 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200253 }
254
Jerome Marchand8c921b22011-06-10 15:28:47 +0200255 /*
256 * System overwrites unused sectors. Free memory associated
257 * with this sector now.
258 */
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600259 if (zram->table[index].handle ||
Jerome Marchand8c921b22011-06-10 15:28:47 +0200260 zram_test_flag(zram, index, ZRAM_ZERO))
261 zram_free_page(zram, index);
262
Cong Wangba82fe22011-11-25 23:14:25 +0800263 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200264
Nitin Gupta397c6062013-01-02 08:53:41 -0800265 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200266 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
267 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800268 kunmap_atomic(user_mem);
269 user_mem = NULL;
270 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200271 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800272 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200273
274 if (page_zero_filled(uncmem)) {
Cong Wangba82fe22011-11-25 23:14:25 +0800275 kunmap_atomic(user_mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200276 if (is_partial_io(bvec))
277 kfree(uncmem);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800278 zram->stats.pages_zero++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200279 zram_set_flag(zram, index, ZRAM_ZERO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200280 ret = 0;
281 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200282 }
283
Jerome Marchand924bd882011-06-10 15:28:48 +0200284 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Jerome Marchand8c921b22011-06-10 15:28:47 +0200285 zram->compress_workmem);
286
Nitin Gupta397c6062013-01-02 08:53:41 -0800287 if (!is_partial_io(bvec)) {
288 kunmap_atomic(user_mem);
289 user_mem = NULL;
290 uncmem = NULL;
291 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200292
293 if (unlikely(ret != LZO_E_OK)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200294 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200295 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200296 }
297
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700298 if (unlikely(clen > max_zpage_size)) {
Davidlohr Buesod178a072013-01-01 21:24:29 -0800299 zram->stats.bad_compress++;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700300 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800301 src = NULL;
302 if (is_partial_io(bvec))
303 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700304 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200305
Minchan Kim130f3152012-06-08 15:39:27 +0900306 handle = zs_malloc(zram->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600307 if (!handle) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200308 pr_info("Error allocating memory for compressed "
309 "page: %u, size=%zu\n", index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200310 ret = -ENOMEM;
311 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200312 }
Seth Jenningsb7418512012-07-02 16:15:52 -0500313 cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200314
Nitin Gupta397c6062013-01-02 08:53:41 -0800315 if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
316 src = kmap_atomic(page);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200317 memcpy(cmem, src, clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800318 if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
319 kunmap_atomic(src);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200320
Minchan Kim130f3152012-06-08 15:39:27 +0900321 zs_unmap_object(zram->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600322
323 zram->table[index].handle = handle;
324 zram->table[index].size = clen;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200325
326 /* Update stats */
327 zram_stat64_add(zram, &zram->stats.compr_size, clen);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800328 zram->stats.pages_stored++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200329 if (clen <= PAGE_SIZE / 2)
Davidlohr Buesod178a072013-01-01 21:24:29 -0800330 zram->stats.good_compress++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200331
Jerome Marchand924bd882011-06-10 15:28:48 +0200332out:
Nitin Gupta397c6062013-01-02 08:53:41 -0800333 if (is_partial_io(bvec))
334 kfree(uncmem);
335
Jerome Marchand924bd882011-06-10 15:28:48 +0200336 if (ret)
337 zram_stat64_inc(zram, &zram->stats.failed_writes);
338 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200339}
340
341static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Jerome Marchand924bd882011-06-10 15:28:48 +0200342 int offset, struct bio *bio, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200343{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200344 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200345
Jerome Marchandc5bde232011-06-10 15:28:49 +0200346 if (rw == READ) {
347 down_read(&zram->lock);
348 ret = zram_bvec_read(zram, bvec, index, offset, bio);
349 up_read(&zram->lock);
350 } else {
351 down_write(&zram->lock);
352 ret = zram_bvec_write(zram, bvec, index, offset);
353 up_write(&zram->lock);
354 }
355
356 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200357}
358
359static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
360{
361 if (*offset + bvec->bv_len >= PAGE_SIZE)
362 (*index)++;
363 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200364}
365
366static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530367{
Jerome Marchand924bd882011-06-10 15:28:48 +0200368 int i, offset;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530369 u32 index;
370 struct bio_vec *bvec;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530371
Jerome Marchand8c921b22011-06-10 15:28:47 +0200372 switch (rw) {
373 case READ:
374 zram_stat64_inc(zram, &zram->stats.num_reads);
375 break;
376 case WRITE:
377 zram_stat64_inc(zram, &zram->stats.num_writes);
378 break;
379 }
380
Nitin Gupta306b0c92009-09-22 10:26:53 +0530381 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
Jerome Marchand924bd882011-06-10 15:28:48 +0200382 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530383
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530384 bio_for_each_segment(bvec, bio, i) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200385 int max_transfer_size = PAGE_SIZE - offset;
386
387 if (bvec->bv_len > max_transfer_size) {
388 /*
389 * zram_bvec_rw() can only make operation on a single
390 * zram page. Split the bio vector.
391 */
392 struct bio_vec bv;
393
394 bv.bv_page = bvec->bv_page;
395 bv.bv_len = max_transfer_size;
396 bv.bv_offset = bvec->bv_offset;
397
398 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
399 goto out;
400
401 bv.bv_len = bvec->bv_len - max_transfer_size;
402 bv.bv_offset += max_transfer_size;
403 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
404 goto out;
405 } else
406 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
407 < 0)
408 goto out;
409
410 update_position(&index, &offset, bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530411 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530412
413 set_bit(BIO_UPTODATE, &bio->bi_flags);
414 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500415 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530416
417out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530418 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530419}
420
Nitin Gupta306b0c92009-09-22 10:26:53 +0530421/*
Jerome Marchand924bd882011-06-10 15:28:48 +0200422 * Check if request is within bounds and aligned on zram logical blocks.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530423 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530424static inline int valid_io_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530425{
426 if (unlikely(
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530427 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
Jerome Marchand924bd882011-06-10 15:28:48 +0200428 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
429 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530430
431 return 0;
432 }
433
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530434 /* I/O request is valid */
Nitin Gupta306b0c92009-09-22 10:26:53 +0530435 return 1;
436}
437
438/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530439 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530440 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200441static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530442{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530443 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530444
Jerome Marchand0900bea2011-09-06 15:02:11 +0200445 down_read(&zram->init_lock);
446 if (unlikely(!zram->init_done))
Minchan Kim3de738c2013-01-30 11:41:41 +0900447 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200448
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530449 if (!valid_io_request(zram, bio)) {
450 zram_stat64_inc(zram, &zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900451 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100452 }
453
Jerome Marchand8c921b22011-06-10 15:28:47 +0200454 __zram_make_request(zram, bio, bio_data_dir(bio));
Jerome Marchand0900bea2011-09-06 15:02:11 +0200455 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530456
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700457 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200458
Jerome Marchand0900bea2011-09-06 15:02:11 +0200459error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900460 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200461 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530462}
463
Jerome Marchand0900bea2011-09-06 15:02:11 +0200464void __zram_reset_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530465{
Nitin Gupta97a06382010-05-13 14:24:21 +0530466 size_t index;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530467
Minchan Kim0231c402013-01-30 11:41:40 +0900468 if (!zram->init_done)
469 return;
470
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530471 zram->init_done = 0;
Nitin Gupta7eef7532010-01-28 21:13:38 +0530472
Nitin Gupta306b0c92009-09-22 10:26:53 +0530473 /* Free various per-device buffers */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530474 kfree(zram->compress_workmem);
475 free_pages((unsigned long)zram->compress_buffer, 1);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530476
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530477 zram->compress_workmem = NULL;
478 zram->compress_buffer = NULL;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530479
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530480 /* Free all pages that are still in this zram device */
481 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
Minchan Kimc2344342012-06-08 15:39:25 +0900482 unsigned long handle = zram->table[index].handle;
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600483 if (!handle)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530484 continue;
485
Minchan Kim130f3152012-06-08 15:39:27 +0900486 zs_free(zram->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530487 }
488
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530489 vfree(zram->table);
490 zram->table = NULL;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530491
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600492 zs_destroy_pool(zram->mem_pool);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530493 zram->mem_pool = NULL;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530494
Nitin Gupta306b0c92009-09-22 10:26:53 +0530495 /* Reset stats */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530496 memset(&zram->stats, 0, sizeof(zram->stats));
Nitin Gupta306b0c92009-09-22 10:26:53 +0530497
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530498 zram->disksize = 0;
Minchan Kim0231c402013-01-30 11:41:40 +0900499 set_capacity(zram->disk, 0);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200500}
501
502void zram_reset_device(struct zram *zram)
503{
504 down_write(&zram->init_lock);
505 __zram_reset_device(zram);
506 up_write(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530507}
508
Minchan Kim3de738c2013-01-30 11:41:41 +0900509/* zram->init_lock should be held */
Nitin Gupta33863c22010-08-09 22:56:47 +0530510int zram_init_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530511{
512 int ret;
513 size_t num_pages;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530514
Minchan Kim0231c402013-01-30 11:41:40 +0900515 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
516 pr_info(
517 "There is little point creating a zram of greater than "
518 "twice the size of memory since we expect a 2:1 compression "
519 "ratio. Note that zram uses about 0.1%% of the size of "
520 "the disk when not in use so a huge zram is "
521 "wasteful.\n"
522 "\tMemory Size: %zu kB\n"
523 "\tSize you selected: %llu kB\n"
524 "Continuing anyway ...\n",
525 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
526 );
527 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530528
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530529 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
530 if (!zram->compress_workmem) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530531 pr_err("Error allocating compressor working memory!\n");
532 ret = -ENOMEM;
Jerome Marchand5a18c532011-09-06 15:02:12 +0200533 goto fail_no_table;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530534 }
535
Jerome Marchandfb927282011-11-30 14:16:16 +0100536 zram->compress_buffer =
537 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530538 if (!zram->compress_buffer) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530539 pr_err("Error allocating compressor buffer space\n");
540 ret = -ENOMEM;
Jerome Marchand5a18c532011-09-06 15:02:12 +0200541 goto fail_no_table;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530542 }
543
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530544 num_pages = zram->disksize >> PAGE_SHIFT;
Joe Perches5b84cc72010-11-04 20:07:59 -0700545 zram->table = vzalloc(num_pages * sizeof(*zram->table));
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530546 if (!zram->table) {
547 pr_err("Error allocating zram address table\n");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530548 ret = -ENOMEM;
Jerome Marchand5a18c532011-09-06 15:02:12 +0200549 goto fail_no_table;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530550 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530551
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530552 /* zram devices sort of resembles non-rotational disks */
553 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530554
Seth Jennings0d145a52013-01-30 09:36:52 -0600555 zram->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530556 if (!zram->mem_pool) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530557 pr_err("Error creating memory pool\n");
558 ret = -ENOMEM;
559 goto fail;
560 }
561
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530562 zram->init_done = 1;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530563
564 pr_debug("Initialization done!\n");
565 return 0;
566
Jerome Marchand5a18c532011-09-06 15:02:12 +0200567fail_no_table:
568 /* To prevent accessing table entries during cleanup */
569 zram->disksize = 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530570fail:
Jerome Marchand0900bea2011-09-06 15:02:11 +0200571 __zram_reset_device(zram);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530572 pr_err("Initialization failed: err=%d\n", ret);
573 return ret;
574}
575
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400576static void zram_slot_free_notify(struct block_device *bdev,
577 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530578{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530579 struct zram *zram;
Nitin Gupta107c1612010-05-17 11:02:44 +0530580
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530581 zram = bdev->bd_disk->private_data;
582 zram_free_page(zram, index);
583 zram_stat64_inc(zram, &zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530584}
585
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530586static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530587 .swap_slot_free_notify = zram_slot_free_notify,
Nitin Gupta107c1612010-05-17 11:02:44 +0530588 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530589};
590
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530591static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530592{
Nitin Guptade1a21a2010-01-28 21:13:40 +0530593 int ret = 0;
594
Jerome Marchandc5bde232011-06-10 15:28:49 +0200595 init_rwsem(&zram->lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200596 init_rwsem(&zram->init_lock);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530597 spin_lock_init(&zram->stat64_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530598
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530599 zram->queue = blk_alloc_queue(GFP_KERNEL);
600 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530601 pr_err("Error allocating disk queue for device %d\n",
602 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530603 ret = -ENOMEM;
604 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530605 }
606
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530607 blk_queue_make_request(zram->queue, zram_make_request);
608 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530609
610 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530611 zram->disk = alloc_disk(1);
612 if (!zram->disk) {
613 blk_cleanup_queue(zram->queue);
Sam Hansen94b84352012-06-07 16:03:47 -0700614 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +0530615 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530616 ret = -ENOMEM;
617 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530618 }
619
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530620 zram->disk->major = zram_major;
621 zram->disk->first_minor = device_id;
622 zram->disk->fops = &zram_devops;
623 zram->disk->queue = zram->queue;
624 zram->disk->private_data = zram;
625 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530626
Nitin Gupta33863c22010-08-09 22:56:47 +0530627 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530628 set_capacity(zram->disk, 0);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530629
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530630 /*
631 * To ensure that we always get PAGE_SIZE aligned
632 * and n*PAGE_SIZED sized I/O requests.
633 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530634 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -0600635 blk_queue_logical_block_size(zram->disk->queue,
636 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530637 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
638 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530639
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530640 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530641
Nitin Gupta33863c22010-08-09 22:56:47 +0530642 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
643 &zram_disk_attr_group);
644 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700645 pr_warn("Error creating sysfs group");
Nitin Gupta33863c22010-08-09 22:56:47 +0530646 goto out;
647 }
Nitin Gupta33863c22010-08-09 22:56:47 +0530648
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530649 zram->init_done = 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530650
651out:
652 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530653}
654
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530655static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530656{
Nitin Gupta33863c22010-08-09 22:56:47 +0530657 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
658 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +0530659
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530660 if (zram->disk) {
661 del_gendisk(zram->disk);
662 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530663 }
664
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530665 if (zram->queue)
666 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530667}
668
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500669unsigned int zram_get_num_devices(void)
670{
671 return num_devices;
672}
673
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530674static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530675{
Nitin Guptade1a21a2010-01-28 21:13:40 +0530676 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530677
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500678 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -0700679 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500680 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530681 ret = -EINVAL;
682 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530683 }
684
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530685 zram_major = register_blkdev(0, "zram");
686 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700687 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530688 ret = -EBUSY;
689 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530690 }
691
Nitin Gupta306b0c92009-09-22 10:26:53 +0530692 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500693 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -0600694 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +0530695 ret = -ENOMEM;
696 goto unregister;
697 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530698
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500699 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600700 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530701 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +0900702 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530703 }
704
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -0800705 pr_info("Created %u device(s) ...\n", num_devices);
706
Nitin Gupta306b0c92009-09-22 10:26:53 +0530707 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530708
Minchan Kim3bf040c2010-01-11 16:15:53 +0900709free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +0530710 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -0600711 destroy_device(&zram_devices[--dev_id]);
712 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530713unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530714 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530715out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530716 return ret;
717}
718
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530719static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530720{
721 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530722 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530723
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500724 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600725 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +0530726
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530727 destroy_device(zram);
Minchan Kim0231c402013-01-30 11:41:40 +0900728 zram_reset_device(zram);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530729 }
730
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530731 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530732
Noah Watkins43801f62011-07-20 17:05:57 -0600733 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530734 pr_debug("Cleanup done!\n");
735}
736
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500737module_param(num_devices, uint, 0);
738MODULE_PARM_DESC(num_devices, "Number of zram devices");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530739
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530740module_init(zram_init);
741module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530742
743MODULE_LICENSE("Dual BSD/GPL");
744MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530745MODULE_DESCRIPTION("Compressed RAM Block Device");