blob: 369fe564279973bec0f33f2c40a185cf08c3185e [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070034#include <linux/err.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070041static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053042
Nitin Gupta306b0c92009-09-22 10:26:53 +053043/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080044static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053045
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070046#define ZRAM_ATTR_RO(name) \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080047static ssize_t name##_show(struct device *d, \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070048 struct device_attribute *attr, char *b) \
49{ \
50 struct zram *zram = dev_to_zram(d); \
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070051 return scnprintf(b, PAGE_SIZE, "%llu\n", \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070052 (u64)atomic64_read(&zram->stats.name)); \
53} \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080054static DEVICE_ATTR_RO(name);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070055
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070056static inline int init_done(struct zram *zram)
57{
58 return zram->meta != NULL;
59}
60
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030061static inline struct zram *dev_to_zram(struct device *dev)
62{
63 return (struct zram *)dev_to_disk(dev)->private_data;
64}
65
66static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070071 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030072}
73
74static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070077 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030078 struct zram *zram = dev_to_zram(dev);
79
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070080 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030083
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070084 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030085}
86
87static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct zram *zram = dev_to_zram(dev);
91
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070092 return scnprintf(buf, PAGE_SIZE, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070093 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030094}
95
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030096static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
98{
99 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300101
102 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700103 if (init_done(zram)) {
104 struct zram_meta *meta = zram->meta;
Minchan Kim722cdc12014-10-09 15:29:50 -0700105 val = zs_get_total_pages(meta->mem_pool);
Weijie Yang5a99e952014-10-29 14:50:57 -0700106 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300107 up_read(&zram->init_lock);
108
Minchan Kim722cdc12014-10-09 15:29:50 -0700109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300110}
111
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700112static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 int val;
116 struct zram *zram = dev_to_zram(dev);
117
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
121
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700123}
124
Minchan Kim9ada9da2014-10-09 15:29:53 -0700125static ssize_t mem_limit_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
127{
128 u64 val;
129 struct zram *zram = dev_to_zram(dev);
130
131 down_read(&zram->init_lock);
132 val = zram->limit_pages;
133 up_read(&zram->init_lock);
134
135 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
136}
137
138static ssize_t mem_limit_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t len)
140{
141 u64 limit;
142 char *tmp;
143 struct zram *zram = dev_to_zram(dev);
144
145 limit = memparse(buf, &tmp);
146 if (buf == tmp) /* no chars parsed, invalid input */
147 return -EINVAL;
148
149 down_write(&zram->init_lock);
150 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 up_write(&zram->init_lock);
152
153 return len;
154}
155
Minchan Kim461a8ee2014-10-09 15:29:55 -0700156static ssize_t mem_used_max_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 u64 val = 0;
160 struct zram *zram = dev_to_zram(dev);
161
162 down_read(&zram->init_lock);
163 if (init_done(zram))
164 val = atomic_long_read(&zram->stats.max_used_pages);
165 up_read(&zram->init_lock);
166
167 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
168}
169
170static ssize_t mem_used_max_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t len)
172{
173 int err;
174 unsigned long val;
175 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700176
177 err = kstrtoul(buf, 10, &val);
178 if (err || val != 0)
179 return -EINVAL;
180
181 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700182 if (init_done(zram)) {
183 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700184 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700186 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700187 up_read(&zram->init_lock);
188
189 return len;
190}
191
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700192static ssize_t max_comp_streams_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t len)
194{
195 int num;
196 struct zram *zram = dev_to_zram(dev);
Minchan Kim60a726e2014-04-07 15:38:21 -0700197 int ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700198
Minchan Kim60a726e2014-04-07 15:38:21 -0700199 ret = kstrtoint(buf, 0, &num);
200 if (ret < 0)
201 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700202 if (num < 1)
203 return -EINVAL;
Minchan Kim60a726e2014-04-07 15:38:21 -0700204
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700205 down_write(&zram->init_lock);
206 if (init_done(zram)) {
Minchan Kim60a726e2014-04-07 15:38:21 -0700207 if (!zcomp_set_max_streams(zram->comp, num)) {
Sergey Senozhatskyfe8eb122014-04-07 15:38:15 -0700208 pr_info("Cannot change max compression streams\n");
Minchan Kim60a726e2014-04-07 15:38:21 -0700209 ret = -EINVAL;
210 goto out;
211 }
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700212 }
Minchan Kim60a726e2014-04-07 15:38:21 -0700213
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700214 zram->max_comp_streams = num;
Minchan Kim60a726e2014-04-07 15:38:21 -0700215 ret = len;
216out:
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700217 up_write(&zram->init_lock);
Minchan Kim60a726e2014-04-07 15:38:21 -0700218 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700219}
220
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700221static ssize_t comp_algorithm_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 size_t sz;
225 struct zram *zram = dev_to_zram(dev);
226
227 down_read(&zram->init_lock);
228 sz = zcomp_available_show(zram->compressor, buf);
229 up_read(&zram->init_lock);
230
231 return sz;
232}
233
234static ssize_t comp_algorithm_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
236{
237 struct zram *zram = dev_to_zram(dev);
238 down_write(&zram->init_lock);
239 if (init_done(zram)) {
240 up_write(&zram->init_lock);
241 pr_info("Can't change algorithm for initialized device\n");
242 return -EBUSY;
243 }
244 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
245 up_write(&zram->init_lock);
246 return len;
247}
248
Minchan Kim92967472014-01-30 15:46:03 -0800249/* flag operations needs meta->tb_lock */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900250static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530251 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530252{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700253 return meta->table[index].value & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530254}
255
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900256static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530257 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530258{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700259 meta->table[index].value |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530260}
261
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900262static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530263 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530264{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700265 meta->table[index].value &= ~BIT(flag);
266}
267
268static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
269{
270 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
271}
272
273static void zram_set_obj_size(struct zram_meta *meta,
274 u32 index, size_t size)
275{
276 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
277
278 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530279}
280
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300281static inline int is_partial_io(struct bio_vec *bvec)
282{
283 return bvec->bv_len != PAGE_SIZE;
284}
285
286/*
287 * Check if request is within bounds and aligned on zram logical blocks.
288 */
karam.lee54850e72014-12-12 16:56:50 -0800289static inline int valid_io_request(struct zram *zram,
290 sector_t start, unsigned int size)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300291{
karam.lee54850e72014-12-12 16:56:50 -0800292 u64 end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530293
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300294 /* unaligned request */
karam.lee54850e72014-12-12 16:56:50 -0800295 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300296 return 0;
karam.lee54850e72014-12-12 16:56:50 -0800297 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300298 return 0;
299
karam.lee54850e72014-12-12 16:56:50 -0800300 end = start + (size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300301 bound = zram->disksize >> SECTOR_SHIFT;
302 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300303 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300304 return 0;
305
306 /* I/O request is valid */
307 return 1;
308}
309
310static void zram_meta_free(struct zram_meta *meta)
311{
312 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300313 vfree(meta->table);
314 kfree(meta);
315}
316
317static struct zram_meta *zram_meta_alloc(u64 disksize)
318{
319 size_t num_pages;
320 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800321
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300322 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800323 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300324
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300325 num_pages = disksize >> PAGE_SHIFT;
326 meta->table = vzalloc(num_pages * sizeof(*meta->table));
327 if (!meta->table) {
328 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800329 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300330 }
331
332 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
333 if (!meta->mem_pool) {
334 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800335 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300336 }
337
338 return meta;
339
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800340out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300341 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300342 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800343 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300344}
345
346static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
347{
348 if (*offset + bvec->bv_len >= PAGE_SIZE)
349 (*index)++;
350 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
351}
352
Nitin Gupta306b0c92009-09-22 10:26:53 +0530353static int page_zero_filled(void *ptr)
354{
355 unsigned int pos;
356 unsigned long *page;
357
358 page = (unsigned long *)ptr;
359
360 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
361 if (page[pos])
362 return 0;
363 }
364
365 return 1;
366}
367
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300368static void handle_zero_page(struct bio_vec *bvec)
369{
370 struct page *page = bvec->bv_page;
371 void *user_mem;
372
373 user_mem = kmap_atomic(page);
374 if (is_partial_io(bvec))
375 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
376 else
377 clear_page(user_mem);
378 kunmap_atomic(user_mem);
379
380 flush_dcache_page(page);
381}
382
Weijie Yangd2d5e762014-08-06 16:08:31 -0700383
384/*
385 * To protect concurrent access to the same index entry,
386 * caller should hold this table index entry's bit_spinlock to
387 * indicate this index entry is accessing.
388 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530389static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530390{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900391 struct zram_meta *meta = zram->meta;
392 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530393
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600394 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530395 /*
396 * No memory is allocated for zero filled pages.
397 * Simply clear zero page flag.
398 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900399 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
400 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700401 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530402 }
403 return;
404 }
405
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900406 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530407
Weijie Yangd2d5e762014-08-06 16:08:31 -0700408 atomic64_sub(zram_get_obj_size(meta, index),
409 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700410 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530411
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900412 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700413 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530414}
415
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300416static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530417{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700418 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200419 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900420 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800421 unsigned long handle;
Minchan Kim023b4092014-08-06 16:08:29 -0700422 size_t size;
Minchan Kim92967472014-01-30 15:46:03 -0800423
Weijie Yangd2d5e762014-08-06 16:08:31 -0700424 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800425 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700426 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200427
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900428 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700429 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800430 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200431 return 0;
432 }
433
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900434 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim92967472014-01-30 15:46:03 -0800435 if (size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800436 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300437 else
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700438 ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900439 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700440 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200441
442 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700443 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200444 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200445 return ret;
446 }
447
448 return 0;
449}
450
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300451static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800452 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300453{
454 int ret;
455 struct page *page;
456 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900457 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300458 page = bvec->bv_page;
459
Weijie Yangd2d5e762014-08-06 16:08:31 -0700460 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900461 if (unlikely(!meta->table[index].handle) ||
462 zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700463 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300464 handle_zero_page(bvec);
465 return 0;
466 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700467 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300468
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300469 if (is_partial_io(bvec))
470 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900471 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
472
473 user_mem = kmap_atomic(page);
474 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300475 uncmem = user_mem;
476
477 if (!uncmem) {
478 pr_info("Unable to allocate temp memory\n");
479 ret = -ENOMEM;
480 goto out_cleanup;
481 }
482
483 ret = zram_decompress_page(zram, uncmem, index);
484 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700485 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300486 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300487
488 if (is_partial_io(bvec))
489 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
490 bvec->bv_len);
491
492 flush_dcache_page(page);
493 ret = 0;
494out_cleanup:
495 kunmap_atomic(user_mem);
496 if (is_partial_io(bvec))
497 kfree(uncmem);
498 return ret;
499}
500
Minchan Kim461a8ee2014-10-09 15:29:55 -0700501static inline void update_used_max(struct zram *zram,
502 const unsigned long pages)
503{
504 int old_max, cur_max;
505
506 old_max = atomic_long_read(&zram->stats.max_used_pages);
507
508 do {
509 cur_max = old_max;
510 if (pages > cur_max)
511 old_max = atomic_long_cmpxchg(
512 &zram->stats.max_used_pages, cur_max, pages);
513 } while (old_max != cur_max);
514}
515
Jerome Marchand924bd882011-06-10 15:28:48 +0200516static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
517 int offset)
518{
Nitin Gupta397c6062013-01-02 08:53:41 -0800519 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200520 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900521 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900522 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200523 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900524 struct zram_meta *meta = zram->meta;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700525 struct zcomp_strm *zstrm;
Minchan Kime46e3312014-01-30 15:46:06 -0800526 bool locked = false;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700527 unsigned long alloced_pages;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200528
529 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200530 if (is_partial_io(bvec)) {
531 /*
532 * This is a partial IO. We need to read the full page
533 * before to write the changes.
534 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900535 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200536 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200537 ret = -ENOMEM;
538 goto out;
539 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300540 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800541 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200542 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200543 }
544
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700545 zstrm = zcomp_strm_find(zram->comp);
Minchan Kime46e3312014-01-30 15:46:06 -0800546 locked = true;
Cong Wangba82fe22011-11-25 23:14:25 +0800547 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200548
Nitin Gupta397c6062013-01-02 08:53:41 -0800549 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200550 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
551 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800552 kunmap_atomic(user_mem);
553 user_mem = NULL;
554 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200555 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800556 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200557
558 if (page_zero_filled(uncmem)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800559 if (user_mem)
560 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900561 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700562 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900563 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800564 zram_set_flag(meta, index, ZRAM_ZERO);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700565 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900566
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700567 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200568 ret = 0;
569 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200570 }
571
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700572 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800573 if (!is_partial_io(bvec)) {
574 kunmap_atomic(user_mem);
575 user_mem = NULL;
576 uncmem = NULL;
577 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200578
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700579 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200580 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200581 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200582 }
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700583 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700584 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700585 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800586 if (is_partial_io(bvec))
587 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700588 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200589
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900590 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600591 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400592 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
593 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200594 ret = -ENOMEM;
595 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200596 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700597
Minchan Kim461a8ee2014-10-09 15:29:55 -0700598 alloced_pages = zs_get_total_pages(meta->mem_pool);
599 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700600 zs_free(meta->mem_pool, handle);
601 ret = -ENOMEM;
602 goto out;
603 }
604
Minchan Kim461a8ee2014-10-09 15:29:55 -0700605 update_used_max(zram, alloced_pages);
606
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900607 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200608
Jiang Liu42e99bd2013-06-07 00:07:30 +0800609 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800610 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800611 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800612 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800613 } else {
614 memcpy(cmem, src, clen);
615 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200616
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700617 zcomp_strm_release(zram->comp, zstrm);
618 locked = false;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900619 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600620
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900621 /*
622 * Free memory associated with this sector
623 * before overwriting unused sectors.
624 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700625 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900626 zram_free_page(zram, index);
627
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900628 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700629 zram_set_obj_size(meta, index, clen);
630 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200631
632 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700633 atomic64_add(clen, &zram->stats.compr_data_size);
634 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200635out:
Minchan Kime46e3312014-01-30 15:46:06 -0800636 if (locked)
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700637 zcomp_strm_release(zram->comp, zstrm);
Nitin Gupta397c6062013-01-02 08:53:41 -0800638 if (is_partial_io(bvec))
639 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200640 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200641}
642
643static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
karam.leeb627cff2014-12-12 16:56:47 -0800644 int offset, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200645{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200646 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200647
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700648 if (rw == READ) {
649 atomic64_inc(&zram->stats.num_reads);
karam.leeb627cff2014-12-12 16:56:47 -0800650 ret = zram_bvec_read(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700651 } else {
652 atomic64_inc(&zram->stats.num_writes);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200653 ret = zram_bvec_write(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700654 }
Jerome Marchandc5bde232011-06-10 15:28:49 +0200655
Chao Yu0cf1e9d2014-08-29 15:18:37 -0700656 if (unlikely(ret)) {
657 if (rw == READ)
658 atomic64_inc(&zram->stats.failed_reads);
659 else
660 atomic64_inc(&zram->stats.failed_writes);
661 }
662
Jerome Marchandc5bde232011-06-10 15:28:49 +0200663 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200664}
665
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700666/*
667 * zram_bio_discard - handler on discard request
668 * @index: physical block index in PAGE_SIZE units
669 * @offset: byte offset within physical block
670 */
671static void zram_bio_discard(struct zram *zram, u32 index,
672 int offset, struct bio *bio)
673{
674 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700675 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700676
677 /*
678 * zram manages data in physical block size units. Because logical block
679 * size isn't identical with physical block size on some arch, we
680 * could get a discard request pointing to a specific offset within a
681 * certain physical block. Although we can handle this request by
682 * reading that physiclal block and decompressing and partially zeroing
683 * and re-compressing and then re-storing it, this isn't reasonable
684 * because our intent with a discard request is to save memory. So
685 * skipping this logical block is appropriate here.
686 */
687 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700688 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700689 return;
690
Weijie Yang38515c72014-06-04 16:11:06 -0700691 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700692 index++;
693 }
694
695 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700696 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700697 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700698 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700699 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700700 index++;
701 n -= PAGE_SIZE;
702 }
703}
704
Minchan Kim2b86ab92013-08-12 15:13:55 +0900705static void zram_reset_device(struct zram *zram, bool reset_capacity)
Jerome Marchand924bd882011-06-10 15:28:48 +0200706{
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300707 size_t index;
708 struct zram_meta *meta;
709
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300710 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -0700711
712 zram->limit_pages = 0;
713
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700714 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300715 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300716 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300717 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300718
719 meta = zram->meta;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300720 /* Free all pages that are still in this zram device */
721 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
722 unsigned long handle = meta->table[index].handle;
723 if (!handle)
724 continue;
725
726 zs_free(meta->mem_pool, handle);
727 }
728
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700729 zcomp_destroy(zram->comp);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700730 zram->max_comp_streams = 1;
731
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300732 zram_meta_free(zram->meta);
733 zram->meta = NULL;
734 /* Reset stats */
735 memset(&zram->stats, 0, sizeof(zram->stats));
736
737 zram->disksize = 0;
Minchan Kimb4c5c602014-07-23 14:00:04 -0700738 if (reset_capacity)
Minchan Kim2b86ab92013-08-12 15:13:55 +0900739 set_capacity(zram->disk, 0);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700740
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300741 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700742
743 /*
744 * Revalidate disk out of the init_lock to avoid lockdep splat.
745 * It's okay because disk's capacity is protected by init_lock
746 * so that revalidate_disk always sees up-to-date capacity.
747 */
748 if (reset_capacity)
749 revalidate_disk(zram->disk);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300750}
751
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300752static ssize_t disksize_store(struct device *dev,
753 struct device_attribute *attr, const char *buf, size_t len)
754{
755 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700756 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300757 struct zram_meta *meta;
758 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700759 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300760
761 disksize = memparse(buf, NULL);
762 if (!disksize)
763 return -EINVAL;
764
765 disksize = PAGE_ALIGN(disksize);
766 meta = zram_meta_alloc(disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800767 if (!meta)
768 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700769
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700770 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700771 if (IS_ERR(comp)) {
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700772 pr_info("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700773 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700774 err = PTR_ERR(comp);
775 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700776 }
777
778 down_write(&zram->init_lock);
779 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700780 pr_info("Cannot change disksize for initialized device\n");
781 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700782 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300783 }
784
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700785 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700786 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300787 zram->disksize = disksize;
788 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300789 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700790
791 /*
792 * Revalidate disk out of the init_lock to avoid lockdep splat.
793 * It's okay because disk's capacity is protected by init_lock
794 * so that revalidate_disk always sees up-to-date capacity.
795 */
796 revalidate_disk(zram->disk);
797
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300798 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700799
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700800out_destroy_comp:
801 up_write(&zram->init_lock);
802 zcomp_destroy(comp);
803out_free_meta:
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700804 zram_meta_free(meta);
805 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300806}
807
808static ssize_t reset_store(struct device *dev,
809 struct device_attribute *attr, const char *buf, size_t len)
810{
811 int ret;
812 unsigned short do_reset;
813 struct zram *zram;
814 struct block_device *bdev;
815
816 zram = dev_to_zram(dev);
817 bdev = bdget_disk(zram->disk, 0);
818
Rashika Kheria46a51c82013-10-30 18:36:32 +0530819 if (!bdev)
820 return -ENOMEM;
821
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300822 /* Do not reset an active device! */
Rashika Kheria1b672222013-11-10 22:13:53 +0530823 if (bdev->bd_holders) {
824 ret = -EBUSY;
825 goto out;
826 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300827
828 ret = kstrtou16(buf, 10, &do_reset);
829 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530830 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300831
Rashika Kheria1b672222013-11-10 22:13:53 +0530832 if (!do_reset) {
833 ret = -EINVAL;
834 goto out;
835 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300836
837 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530838 fsync_bdev(bdev);
Rashika Kheria1b672222013-11-10 22:13:53 +0530839 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300840
Minchan Kim2b86ab92013-08-12 15:13:55 +0900841 zram_reset_device(zram, true);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300842 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530843
844out:
845 bdput(bdev);
846 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200847}
848
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700849static void __zram_make_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530850{
karam.leeb627cff2014-12-12 16:56:47 -0800851 int offset, rw;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530852 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800853 struct bio_vec bvec;
854 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530855
Kent Overstreet4f024f32013-10-11 15:44:27 -0700856 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
857 offset = (bio->bi_iter.bi_sector &
858 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530859
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700860 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
861 zram_bio_discard(zram, index, offset, bio);
862 bio_endio(bio, 0);
863 return;
864 }
865
karam.leeb627cff2014-12-12 16:56:47 -0800866 rw = bio_data_dir(bio);
Kent Overstreet79886132013-11-23 17:19:00 -0800867 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200868 int max_transfer_size = PAGE_SIZE - offset;
869
Kent Overstreet79886132013-11-23 17:19:00 -0800870 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200871 /*
872 * zram_bvec_rw() can only make operation on a single
873 * zram page. Split the bio vector.
874 */
875 struct bio_vec bv;
876
Kent Overstreet79886132013-11-23 17:19:00 -0800877 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200878 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800879 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200880
karam.leeb627cff2014-12-12 16:56:47 -0800881 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200882 goto out;
883
Kent Overstreet79886132013-11-23 17:19:00 -0800884 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200885 bv.bv_offset += max_transfer_size;
karam.leeb627cff2014-12-12 16:56:47 -0800886 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200887 goto out;
888 } else
karam.leeb627cff2014-12-12 16:56:47 -0800889 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200890 goto out;
891
Kent Overstreet79886132013-11-23 17:19:00 -0800892 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530893 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530894
895 set_bit(BIO_UPTODATE, &bio->bi_flags);
896 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500897 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530898
899out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530900 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530901}
902
Nitin Gupta306b0c92009-09-22 10:26:53 +0530903/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530904 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530905 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200906static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530907{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530908 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530909
Jerome Marchand0900bea2011-09-06 15:02:11 +0200910 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700911 if (unlikely(!init_done(zram)))
Minchan Kim3de738c2013-01-30 11:41:41 +0900912 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200913
karam.lee54850e72014-12-12 16:56:50 -0800914 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
915 bio->bi_iter.bi_size)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800916 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900917 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100918 }
919
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700920 __zram_make_request(zram, bio);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200921 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530922
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700923 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200924
Jerome Marchand0900bea2011-09-06 15:02:11 +0200925error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900926 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200927 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530928}
929
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400930static void zram_slot_free_notify(struct block_device *bdev,
931 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530932{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530933 struct zram *zram;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800934 struct zram_meta *meta;
Nitin Gupta107c1612010-05-17 11:02:44 +0530935
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530936 zram = bdev->bd_disk->private_data;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800937 meta = zram->meta;
938
Weijie Yangd2d5e762014-08-06 16:08:31 -0700939 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kimf614a9f2014-01-30 15:46:04 -0800940 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700941 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800942 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530943}
944
karam.lee8c7f0102014-12-12 16:56:53 -0800945static int zram_rw_page(struct block_device *bdev, sector_t sector,
946 struct page *page, int rw)
947{
948 int offset, err;
949 u32 index;
950 struct zram *zram;
951 struct bio_vec bv;
952
953 zram = bdev->bd_disk->private_data;
954 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
955 atomic64_inc(&zram->stats.invalid_io);
956 return -EINVAL;
957 }
958
959 down_read(&zram->init_lock);
960 if (unlikely(!init_done(zram))) {
961 err = -EIO;
962 goto out_unlock;
963 }
964
965 index = sector >> SECTORS_PER_PAGE_SHIFT;
966 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
967
968 bv.bv_page = page;
969 bv.bv_len = PAGE_SIZE;
970 bv.bv_offset = 0;
971
972 err = zram_bvec_rw(zram, &bv, index, offset, rw);
973out_unlock:
974 up_read(&zram->init_lock);
975 /*
976 * If I/O fails, just return error(ie, non-zero) without
977 * calling page_endio.
978 * It causes resubmit the I/O with bio request by upper functions
979 * of rw_page(e.g., swap_readpage, __swap_writepage) and
980 * bio->bi_end_io does things to handle the error
981 * (e.g., SetPageError, set_page_dirty and extra works).
982 */
983 if (err == 0)
984 page_endio(page, rw, 0);
985 return err;
986}
987
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530988static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530989 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -0800990 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +0530991 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530992};
993
Ganesh Mahendran083914e2014-12-12 16:57:13 -0800994static DEVICE_ATTR_RW(disksize);
995static DEVICE_ATTR_RO(initstate);
996static DEVICE_ATTR_WO(reset);
997static DEVICE_ATTR_RO(orig_data_size);
998static DEVICE_ATTR_RO(mem_used_total);
999static DEVICE_ATTR_RW(mem_limit);
1000static DEVICE_ATTR_RW(mem_used_max);
1001static DEVICE_ATTR_RW(max_comp_streams);
1002static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001003
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001004ZRAM_ATTR_RO(num_reads);
1005ZRAM_ATTR_RO(num_writes);
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001006ZRAM_ATTR_RO(failed_reads);
1007ZRAM_ATTR_RO(failed_writes);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001008ZRAM_ATTR_RO(invalid_io);
1009ZRAM_ATTR_RO(notify_free);
1010ZRAM_ATTR_RO(zero_pages);
1011ZRAM_ATTR_RO(compr_data_size);
1012
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001013static struct attribute *zram_disk_attrs[] = {
1014 &dev_attr_disksize.attr,
1015 &dev_attr_initstate.attr,
1016 &dev_attr_reset.attr,
1017 &dev_attr_num_reads.attr,
1018 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001019 &dev_attr_failed_reads.attr,
1020 &dev_attr_failed_writes.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001021 &dev_attr_invalid_io.attr,
1022 &dev_attr_notify_free.attr,
1023 &dev_attr_zero_pages.attr,
1024 &dev_attr_orig_data_size.attr,
1025 &dev_attr_compr_data_size.attr,
1026 &dev_attr_mem_used_total.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001027 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001028 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001029 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001030 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001031 NULL,
1032};
1033
1034static struct attribute_group zram_disk_attr_group = {
1035 .attrs = zram_disk_attrs,
1036};
1037
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301038static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301039{
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001040 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301041
Jerome Marchand0900bea2011-09-06 15:02:11 +02001042 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301043
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301044 zram->queue = blk_alloc_queue(GFP_KERNEL);
1045 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301046 pr_err("Error allocating disk queue for device %d\n",
1047 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301048 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301049 }
1050
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301051 blk_queue_make_request(zram->queue, zram_make_request);
1052 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301053
1054 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301055 zram->disk = alloc_disk(1);
1056 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -07001057 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301058 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001059 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301060 }
1061
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301062 zram->disk->major = zram_major;
1063 zram->disk->first_minor = device_id;
1064 zram->disk->fops = &zram_devops;
1065 zram->disk->queue = zram->queue;
1066 zram->disk->private_data = zram;
1067 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301068
Nitin Gupta33863c22010-08-09 22:56:47 +05301069 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301070 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001071 /* zram devices sort of resembles non-rotational disks */
1072 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001073 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301074 /*
1075 * To ensure that we always get PAGE_SIZE aligned
1076 * and n*PAGE_SIZED sized I/O requests.
1077 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301078 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001079 blk_queue_logical_block_size(zram->disk->queue,
1080 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301081 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1082 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001083 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1084 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1085 /*
1086 * zram_bio_discard() will clear all logical blocks if logical block
1087 * size is identical with physical block size(PAGE_SIZE). But if it is
1088 * different, we will skip discarding some parts of logical blocks in
1089 * the part of the request range which isn't aligned to physical block
1090 * size. So we can't ensure that all discarded logical blocks are
1091 * zeroed.
1092 */
1093 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1094 zram->disk->queue->limits.discard_zeroes_data = 1;
1095 else
1096 zram->disk->queue->limits.discard_zeroes_data = 0;
1097 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301098
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301099 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301100
Nitin Gupta33863c22010-08-09 22:56:47 +05301101 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1102 &zram_disk_attr_group);
1103 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001104 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001105 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301106 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001107 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001108 zram->meta = NULL;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001109 zram->max_comp_streams = 1;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001110 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301111
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001112out_free_disk:
1113 del_gendisk(zram->disk);
1114 put_disk(zram->disk);
1115out_free_queue:
1116 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301117out:
1118 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301119}
1120
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301121static void destroy_device(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301122{
Nitin Gupta33863c22010-08-09 22:56:47 +05301123 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1124 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +05301125
Rashika Kheria59d3fe52013-10-30 18:43:32 +05301126 del_gendisk(zram->disk);
1127 put_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301128
Rashika Kheria59d3fe52013-10-30 18:43:32 +05301129 blk_cleanup_queue(zram->queue);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301130}
1131
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301132static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301133{
Nitin Guptade1a21a2010-01-28 21:13:40 +05301134 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301135
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001136 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -07001137 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001138 num_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301139 ret = -EINVAL;
1140 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301141 }
1142
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301143 zram_major = register_blkdev(0, "zram");
1144 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001145 pr_warn("Unable to get major number\n");
Nitin Guptade1a21a2010-01-28 21:13:40 +05301146 ret = -EBUSY;
1147 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301148 }
1149
Nitin Gupta306b0c92009-09-22 10:26:53 +05301150 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001151 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -06001152 if (!zram_devices) {
Nitin Guptade1a21a2010-01-28 21:13:40 +05301153 ret = -ENOMEM;
1154 goto unregister;
1155 }
Nitin Gupta306b0c92009-09-22 10:26:53 +05301156
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001157 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -06001158 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301159 if (ret)
Minchan Kim3bf040c2010-01-11 16:15:53 +09001160 goto free_devices;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301161 }
1162
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -08001163 pr_info("Created %u device(s) ...\n", num_devices);
1164
Nitin Gupta306b0c92009-09-22 10:26:53 +05301165 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301166
Minchan Kim3bf040c2010-01-11 16:15:53 +09001167free_devices:
Nitin Guptade1a21a2010-01-28 21:13:40 +05301168 while (dev_id)
Noah Watkins43801f62011-07-20 17:05:57 -06001169 destroy_device(&zram_devices[--dev_id]);
1170 kfree(zram_devices);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301171unregister:
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301172 unregister_blkdev(zram_major, "zram");
Nitin Guptade1a21a2010-01-28 21:13:40 +05301173out:
Nitin Gupta306b0c92009-09-22 10:26:53 +05301174 return ret;
1175}
1176
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301177static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301178{
1179 int i;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301180 struct zram *zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301181
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001182 for (i = 0; i < num_devices; i++) {
Noah Watkins43801f62011-07-20 17:05:57 -06001183 zram = &zram_devices[i];
Nitin Gupta306b0c92009-09-22 10:26:53 +05301184
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301185 destroy_device(zram);
Minchan Kim2b86ab92013-08-12 15:13:55 +09001186 /*
1187 * Shouldn't access zram->disk after destroy_device
1188 * because destroy_device already released zram->disk.
1189 */
1190 zram_reset_device(zram, false);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301191 }
1192
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301193 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +05301194
Noah Watkins43801f62011-07-20 17:05:57 -06001195 kfree(zram_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301196 pr_debug("Cleanup done!\n");
1197}
1198
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301199module_init(zram_init);
1200module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301201
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001202module_param(num_devices, uint, 0);
1203MODULE_PARM_DESC(num_devices, "Number of zram devices");
1204
Nitin Gupta306b0c92009-09-22 10:26:53 +05301205MODULE_LICENSE("Dual BSD/GPL");
1206MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301207MODULE_DESCRIPTION("Compressed RAM Block Device");