blob: ec2b2b5a122e87831fa0661cedd24b86f2cc2ce1 [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Nitin Gupta306b0c92009-09-22 10:26:53 +05305 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/lzo.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053034#include <linux/vmalloc.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Nitin Gupta306b0c92009-09-22 10:26:53 +053041
Nitin Gupta306b0c92009-09-22 10:26:53 +053042/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080043static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053044
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090045static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053046 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +053047{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090048 return meta->table[index].flags & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +053049}
50
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090051static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053052 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +053053{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090054 meta->table[index].flags |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +053055}
56
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090057static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053058 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +053059{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090060 meta->table[index].flags &= ~BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +053061}
62
63static int page_zero_filled(void *ptr)
64{
65 unsigned int pos;
66 unsigned long *page;
67
68 page = (unsigned long *)ptr;
69
70 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
71 if (page[pos])
72 return 0;
73 }
74
75 return 1;
76}
77
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053078static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +053079{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090080 struct zram_meta *meta = zram->meta;
81 unsigned long handle = meta->table[index].handle;
82 u16 size = meta->table[index].size;
Nitin Gupta306b0c92009-09-22 10:26:53 +053083
Nitin Guptafd1a30d2012-01-09 16:51:59 -060084 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +053085 /*
86 * No memory is allocated for zero filled pages.
87 * Simply clear zero page flag.
88 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090089 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
90 zram_clear_flag(meta, index, ZRAM_ZERO);
Davidlohr Buesod178a072013-01-01 21:24:29 -080091 zram->stats.pages_zero--;
Nitin Gupta306b0c92009-09-22 10:26:53 +053092 }
93 return;
94 }
95
Minchan Kim130f3152012-06-08 15:39:27 +090096 if (unlikely(size > max_zpage_size))
Davidlohr Buesod178a072013-01-01 21:24:29 -080097 zram->stats.bad_compress--;
Nitin Gupta306b0c92009-09-22 10:26:53 +053098
Minchan Kim8b3cc3e2013-02-06 08:48:53 +090099 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530100
Minchan Kim130f3152012-06-08 15:39:27 +0900101 if (size <= PAGE_SIZE / 2)
Davidlohr Buesod178a072013-01-01 21:24:29 -0800102 zram->stats.good_compress--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530103
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800104 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800105 zram->stats.pages_stored--;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530106
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900107 meta->table[index].handle = 0;
108 meta->table[index].size = 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530109}
110
Jiang Liu42e99bd2013-06-07 00:07:30 +0800111static inline int is_partial_io(struct bio_vec *bvec)
112{
113 return bvec->bv_len != PAGE_SIZE;
114}
115
Jerome Marchand924bd882011-06-10 15:28:48 +0200116static void handle_zero_page(struct bio_vec *bvec)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530117{
Jerome Marchand924bd882011-06-10 15:28:48 +0200118 struct page *page = bvec->bv_page;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530119 void *user_mem;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530120
Cong Wangba82fe22011-11-25 23:14:25 +0800121 user_mem = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800122 if (is_partial_io(bvec))
123 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
124 else
125 clear_page(user_mem);
Cong Wangba82fe22011-11-25 23:14:25 +0800126 kunmap_atomic(user_mem);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530127
Nitin Gupta30fb8a72009-12-12 11:44:46 +0530128 flush_dcache_page(page);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530129}
130
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300131static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530132{
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300133 int ret = LZO_E_OK;
Jerome Marchand924bd882011-06-10 15:28:48 +0200134 size_t clen = PAGE_SIZE;
Jerome Marchand924bd882011-06-10 15:28:48 +0200135 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900136 struct zram_meta *meta = zram->meta;
137 unsigned long handle = meta->table[index].handle;
Jerome Marchand924bd882011-06-10 15:28:48 +0200138
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900139 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Jiang Liu42e99bd2013-06-07 00:07:30 +0800140 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200141 return 0;
142 }
143
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900144 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
145 if (meta->table[index].size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800146 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300147 else
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900148 ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300149 mem, &clen);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900150 zs_unmap_object(meta->mem_pool, handle);
Jerome Marchand924bd882011-06-10 15:28:48 +0200151
152 /* Should NEVER happen. Return bio error if it does. */
153 if (unlikely(ret != LZO_E_OK)) {
154 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800155 atomic64_inc(&zram->stats.failed_reads);
Jerome Marchand924bd882011-06-10 15:28:48 +0200156 return ret;
157 }
158
159 return 0;
160}
161
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300162static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
163 u32 index, int offset, struct bio *bio)
164{
165 int ret;
166 struct page *page;
167 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900168 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300169 page = bvec->bv_page;
170
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900171 if (unlikely(!meta->table[index].handle) ||
172 zram_test_flag(meta, index, ZRAM_ZERO)) {
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300173 handle_zero_page(bvec);
174 return 0;
175 }
176
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300177 if (is_partial_io(bvec))
178 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900179 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
180
181 user_mem = kmap_atomic(page);
182 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300183 uncmem = user_mem;
184
185 if (!uncmem) {
186 pr_info("Unable to allocate temp memory\n");
187 ret = -ENOMEM;
188 goto out_cleanup;
189 }
190
191 ret = zram_decompress_page(zram, uncmem, index);
192 /* Should NEVER happen. Return bio error if it does. */
Wanpeng Li25eeb662013-03-13 15:06:16 +0800193 if (unlikely(ret != LZO_E_OK))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300194 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300195
196 if (is_partial_io(bvec))
197 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
198 bvec->bv_len);
199
200 flush_dcache_page(page);
201 ret = 0;
202out_cleanup:
203 kunmap_atomic(user_mem);
204 if (is_partial_io(bvec))
205 kfree(uncmem);
206 return ret;
207}
208
Jerome Marchand924bd882011-06-10 15:28:48 +0200209static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
210 int offset)
211{
Nitin Gupta397c6062013-01-02 08:53:41 -0800212 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200213 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900214 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900215 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200216 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900217 struct zram_meta *meta = zram->meta;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200218
219 page = bvec->bv_page;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900220 src = meta->compress_buffer;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200221
Jerome Marchand924bd882011-06-10 15:28:48 +0200222 if (is_partial_io(bvec)) {
223 /*
224 * This is a partial IO. We need to read the full page
225 * before to write the changes.
226 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900227 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200228 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200229 ret = -ENOMEM;
230 goto out;
231 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300232 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800233 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200234 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200235 }
236
Jerome Marchand8c921b22011-06-10 15:28:47 +0200237 /*
238 * System overwrites unused sectors. Free memory associated
239 * with this sector now.
240 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900241 if (meta->table[index].handle ||
242 zram_test_flag(meta, index, ZRAM_ZERO))
Jerome Marchand8c921b22011-06-10 15:28:47 +0200243 zram_free_page(zram, index);
244
Cong Wangba82fe22011-11-25 23:14:25 +0800245 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200246
Nitin Gupta397c6062013-01-02 08:53:41 -0800247 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200248 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
249 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800250 kunmap_atomic(user_mem);
251 user_mem = NULL;
252 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200253 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800254 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200255
256 if (page_zero_filled(uncmem)) {
Cong Wangba82fe22011-11-25 23:14:25 +0800257 kunmap_atomic(user_mem);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800258 zram->stats.pages_zero++;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900259 zram_set_flag(meta, index, ZRAM_ZERO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200260 ret = 0;
261 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200262 }
263
Jerome Marchand924bd882011-06-10 15:28:48 +0200264 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900265 meta->compress_workmem);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200266
Nitin Gupta397c6062013-01-02 08:53:41 -0800267 if (!is_partial_io(bvec)) {
268 kunmap_atomic(user_mem);
269 user_mem = NULL;
270 uncmem = NULL;
271 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200272
273 if (unlikely(ret != LZO_E_OK)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200274 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200275 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200276 }
277
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700278 if (unlikely(clen > max_zpage_size)) {
Davidlohr Buesod178a072013-01-01 21:24:29 -0800279 zram->stats.bad_compress++;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700280 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800281 src = NULL;
282 if (is_partial_io(bvec))
283 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700284 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200285
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900286 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600287 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400288 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
289 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200290 ret = -ENOMEM;
291 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200292 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900293 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200294
Jiang Liu42e99bd2013-06-07 00:07:30 +0800295 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800296 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800297 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800298 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800299 } else {
300 memcpy(cmem, src, clen);
301 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200302
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900303 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600304
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900305 meta->table[index].handle = handle;
306 meta->table[index].size = clen;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200307
308 /* Update stats */
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800309 atomic64_add(clen, &zram->stats.compr_size);
Davidlohr Buesod178a072013-01-01 21:24:29 -0800310 zram->stats.pages_stored++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200311 if (clen <= PAGE_SIZE / 2)
Davidlohr Buesod178a072013-01-01 21:24:29 -0800312 zram->stats.good_compress++;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200313
Jerome Marchand924bd882011-06-10 15:28:48 +0200314out:
Nitin Gupta397c6062013-01-02 08:53:41 -0800315 if (is_partial_io(bvec))
316 kfree(uncmem);
317
Jerome Marchand924bd882011-06-10 15:28:48 +0200318 if (ret)
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800319 atomic64_inc(&zram->stats.failed_writes);
Jerome Marchand924bd882011-06-10 15:28:48 +0200320 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200321}
322
323static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Jerome Marchand924bd882011-06-10 15:28:48 +0200324 int offset, struct bio *bio, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200325{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200326 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200327
Jerome Marchandc5bde232011-06-10 15:28:49 +0200328 if (rw == READ) {
329 down_read(&zram->lock);
330 ret = zram_bvec_read(zram, bvec, index, offset, bio);
331 up_read(&zram->lock);
332 } else {
333 down_write(&zram->lock);
334 ret = zram_bvec_write(zram, bvec, index, offset);
335 up_write(&zram->lock);
336 }
337
338 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200339}
340
341static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
342{
343 if (*offset + bvec->bv_len >= PAGE_SIZE)
344 (*index)++;
345 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200346}
347
348static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530349{
Jerome Marchand924bd882011-06-10 15:28:48 +0200350 int i, offset;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530351 u32 index;
352 struct bio_vec *bvec;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530353
Jerome Marchand8c921b22011-06-10 15:28:47 +0200354 switch (rw) {
355 case READ:
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800356 atomic64_inc(&zram->stats.num_reads);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200357 break;
358 case WRITE:
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800359 atomic64_inc(&zram->stats.num_writes);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200360 break;
361 }
362
Nitin Gupta306b0c92009-09-22 10:26:53 +0530363 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
Jerome Marchand924bd882011-06-10 15:28:48 +0200364 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530365
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530366 bio_for_each_segment(bvec, bio, i) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200367 int max_transfer_size = PAGE_SIZE - offset;
368
369 if (bvec->bv_len > max_transfer_size) {
370 /*
371 * zram_bvec_rw() can only make operation on a single
372 * zram page. Split the bio vector.
373 */
374 struct bio_vec bv;
375
376 bv.bv_page = bvec->bv_page;
377 bv.bv_len = max_transfer_size;
378 bv.bv_offset = bvec->bv_offset;
379
380 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
381 goto out;
382
383 bv.bv_len = bvec->bv_len - max_transfer_size;
384 bv.bv_offset += max_transfer_size;
385 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
386 goto out;
387 } else
388 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
389 < 0)
390 goto out;
391
392 update_position(&index, &offset, bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530393 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530394
395 set_bit(BIO_UPTODATE, &bio->bi_flags);
396 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500397 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530398
399out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530400 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530401}
402
Nitin Gupta306b0c92009-09-22 10:26:53 +0530403/*
Jerome Marchand924bd882011-06-10 15:28:48 +0200404 * Check if request is within bounds and aligned on zram logical blocks.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530405 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530406static inline int valid_io_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530407{
Jiang Liu12a7ad32013-06-07 00:07:26 +0800408 u64 start, end, bound;
409
410 /* unaligned request */
411 if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Nitin Gupta306b0c92009-09-22 10:26:53 +0530412 return 0;
Jiang Liu12a7ad32013-06-07 00:07:26 +0800413 if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
414 return 0;
415
416 start = bio->bi_sector;
417 end = start + (bio->bi_size >> SECTOR_SHIFT);
418 bound = zram->disksize >> SECTOR_SHIFT;
419 /* out of range range */
420 if (unlikely(start >= bound || end >= bound || start > end))
421 return 0;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530422
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530423 /* I/O request is valid */
Nitin Gupta306b0c92009-09-22 10:26:53 +0530424 return 1;
425}
426
427/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530428 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530429 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200430static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530431{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530432 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530433
Jerome Marchand0900bea2011-09-06 15:02:11 +0200434 down_read(&zram->init_lock);
435 if (unlikely(!zram->init_done))
Minchan Kim3de738c2013-01-30 11:41:41 +0900436 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200437
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530438 if (!valid_io_request(zram, bio)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800439 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900440 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100441 }
442
Jerome Marchand8c921b22011-06-10 15:28:47 +0200443 __zram_make_request(zram, bio, bio_data_dir(bio));
Jerome Marchand0900bea2011-09-06 15:02:11 +0200444 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530445
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700446 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200447
Jerome Marchand0900bea2011-09-06 15:02:11 +0200448error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900449 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200450 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530451}
452
Fengguang Wu1e927712013-02-08 10:15:10 +0800453static void __zram_reset_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530454{
Nitin Gupta97a06382010-05-13 14:24:21 +0530455 size_t index;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900456 struct zram_meta *meta;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530457
Minchan Kim0231c402013-01-30 11:41:40 +0900458 if (!zram->init_done)
459 return;
460
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900461 meta = zram->meta;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530462 zram->init_done = 0;
Nitin Gupta7eef7532010-01-28 21:13:38 +0530463
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530464 /* Free all pages that are still in this zram device */
465 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900466 unsigned long handle = meta->table[index].handle;
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600467 if (!handle)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530468 continue;
469
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900470 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530471 }
472
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900473 zram_meta_free(zram->meta);
474 zram->meta = NULL;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530475 /* Reset stats */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530476 memset(&zram->stats, 0, sizeof(zram->stats));
Nitin Gupta306b0c92009-09-22 10:26:53 +0530477
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530478 zram->disksize = 0;
Minchan Kim0231c402013-01-30 11:41:40 +0900479 set_capacity(zram->disk, 0);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200480}
481
482void zram_reset_device(struct zram *zram)
483{
484 down_write(&zram->init_lock);
485 __zram_reset_device(zram);
486 up_write(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530487}
488
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900489void zram_meta_free(struct zram_meta *meta)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530490{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900491 zs_destroy_pool(meta->mem_pool);
492 kfree(meta->compress_workmem);
493 free_pages((unsigned long)meta->compress_buffer, 1);
494 vfree(meta->table);
495 kfree(meta);
496}
Nitin Gupta306b0c92009-09-22 10:26:53 +0530497
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900498struct zram_meta *zram_meta_alloc(u64 disksize)
499{
500 size_t num_pages;
501 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
502 if (!meta)
503 goto out;
504
505 meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -0800506 if (!meta->compress_workmem)
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900507 goto free_meta;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900508
509 meta->compress_buffer =
510 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
511 if (!meta->compress_buffer) {
512 pr_err("Error allocating compressor buffer space\n");
513 goto free_workmem;
514 }
515
516 num_pages = disksize >> PAGE_SHIFT;
517 meta->table = vzalloc(num_pages * sizeof(*meta->table));
518 if (!meta->table) {
519 pr_err("Error allocating zram address table\n");
520 goto free_buffer;
521 }
522
523 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
524 if (!meta->mem_pool) {
525 pr_err("Error creating memory pool\n");
526 goto free_table;
527 }
528
529 return meta;
530
531free_table:
532 vfree(meta->table);
533free_buffer:
534 free_pages((unsigned long)meta->compress_buffer, 1);
535free_workmem:
536 kfree(meta->compress_workmem);
537free_meta:
538 kfree(meta);
539 meta = NULL;
540out:
541 return meta;
542}
543
544void zram_init_device(struct zram *zram, struct zram_meta *meta)
545{
Minchan Kim0231c402013-01-30 11:41:40 +0900546 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
547 pr_info(
548 "There is little point creating a zram of greater than "
549 "twice the size of memory since we expect a 2:1 compression "
550 "ratio. Note that zram uses about 0.1%% of the size of "
551 "the disk when not in use so a huge zram is "
552 "wasteful.\n"
Minchan Kim152bce62013-02-06 08:45:22 +0900553 "\tMemory Size: %lu kB\n"
Minchan Kim0231c402013-01-30 11:41:40 +0900554 "\tSize you selected: %llu kB\n"
555 "Continuing anyway ...\n",
556 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
557 );
558 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530559
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530560 /* zram devices sort of resembles non-rotational disks */
561 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530562
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900563 zram->meta = meta;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530564 zram->init_done = 1;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530565
566 pr_debug("Initialization done!\n");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530567}
568
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400569static void zram_slot_free_notify(struct block_device *bdev,
570 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530571{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530572 struct zram *zram;
Nitin Gupta107c1612010-05-17 11:02:44 +0530573
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530574 zram = bdev->bd_disk->private_data;
Jiang Liu57ab0482013-06-07 00:07:23 +0800575 down_write(&zram->lock);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530576 zram_free_page(zram, index);
Jiang Liu57ab0482013-06-07 00:07:23 +0800577 up_write(&zram->lock);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800578 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530579}
580
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530581static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530582 .swap_slot_free_notify = zram_slot_free_notify,
Nitin Gupta107c1612010-05-17 11:02:44 +0530583 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530584};
585
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530586static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530587{
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800588 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530589
Jerome Marchandc5bde232011-06-10 15:28:49 +0200590 init_rwsem(&zram->lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200591 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530592
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530593 zram->queue = blk_alloc_queue(GFP_KERNEL);
594 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +0530595 pr_err("Error allocating disk queue for device %d\n",
596 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530597 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530598 }
599
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530600 blk_queue_make_request(zram->queue, zram_make_request);
601 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530602
603 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530604 zram->disk = alloc_disk(1);
605 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -0700606 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +0530607 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800608 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530609 }
610
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530611 zram->disk->major = zram_major;
612 zram->disk->first_minor = device_id;
613 zram->disk->fops = &zram_devops;
614 zram->disk->queue = zram->queue;
615 zram->disk->private_data = zram;
616 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530617
Nitin Gupta33863c22010-08-09 22:56:47 +0530618 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530619 set_capacity(zram->disk, 0);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530620
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530621 /*
622 * To ensure that we always get PAGE_SIZE aligned
623 * and n*PAGE_SIZED sized I/O requests.
624 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530625 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -0600626 blk_queue_logical_block_size(zram->disk->queue,
627 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530628 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
629 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +0530630
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530631 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530632
Nitin Gupta33863c22010-08-09 22:56:47 +0530633 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
634 &zram_disk_attr_group);
635 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700636 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800637 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +0530638 }
Nitin Gupta33863c22010-08-09 22:56:47 +0530639
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530640 zram->init_done = 0;
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800641 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530642
Jiang Liu39a9b8a2013-06-07 00:07:24 +0800643out_free_disk:
644 del_gendisk(zram->disk);
645 put_disk(zram->disk);
646out_free_queue:
647 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530648out:
649 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530650}
651
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530652static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530653{
Nitin Gupta33863c22010-08-09 22:56:47 +0530654 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
655 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +0530656
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530657 if (zram->disk) {
658 del_gendisk(zram->disk);
659 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530660 }
661
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530662 if (zram->queue)
663 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530664}
665
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530666static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530667{
Nitin Guptade1a21a2010-01-28 21:13:40 +0530668 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530669
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500670 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -0700671 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500672 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530673 ret = -EINVAL;
674 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530675 }
676
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530677 zram_major = register_blkdev(0, "zram");
678 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -0700679 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530680 ret = -EBUSY;
681 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530682 }
683
Nitin Gupta306b0c92009-09-22 10:26:53 +0530684 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500685 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -0600686 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +0530687 ret = -ENOMEM;
688 goto unregister;
689 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530690
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500691 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600692 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530693 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +0900694 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530695 }
696
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -0800697 pr_info("Created %u device(s) ...\n", num_devices);
698
Nitin Gupta306b0c92009-09-22 10:26:53 +0530699 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +0530700
Minchan Kim3bf040c2010-01-11 16:15:53 +0900701free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +0530702 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -0600703 destroy_device(&zram_devices[--dev_id]);
704 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +0530705unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530706 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +0530707out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530708 return ret;
709}
710
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530711static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530712{
713 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530714 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530715
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500716 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -0600717 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +0530718
Jiang Liu6030ea92013-06-07 00:07:22 +0800719 get_disk(zram->disk);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530720 destroy_device(zram);
Minchan Kim0231c402013-01-30 11:41:40 +0900721 zram_reset_device(zram);
Jiang Liu6030ea92013-06-07 00:07:22 +0800722 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530723 }
724
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530725 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530726
Noah Watkins43801f62011-07-20 17:05:57 -0600727 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530728 pr_debug("Cleanup done!\n");
729}
730
Nitin Gupta5fa5a902012-02-12 23:04:45 -0500731module_param(num_devices, uint, 0);
732MODULE_PARM_DESC(num_devices, "Number of zram devices");
Nitin Gupta306b0c92009-09-22 10:26:53 +0530733
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530734module_init(zram_init);
735module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530736
737MODULE_LICENSE("Dual BSD/GPL");
738MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530739MODULE_DESCRIPTION("Compressed RAM Block Device");