blob: 8e233edd7a097a0f91d323071c97f16e43308e5d [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070034#include <linux/err.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070041static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053042
Nitin Gupta306b0c92009-09-22 10:26:53 +053043/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080044static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053045
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070046#define ZRAM_ATTR_RO(name) \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080047static ssize_t name##_show(struct device *d, \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070048 struct device_attribute *attr, char *b) \
49{ \
50 struct zram *zram = dev_to_zram(d); \
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070051 return scnprintf(b, PAGE_SIZE, "%llu\n", \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070052 (u64)atomic64_read(&zram->stats.name)); \
53} \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080054static DEVICE_ATTR_RO(name);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070055
Minchan Kim08eee692015-02-12 15:00:45 -080056static inline bool init_done(struct zram *zram)
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070057{
Minchan Kim08eee692015-02-12 15:00:45 -080058 return zram->disksize;
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070059}
60
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030061static inline struct zram *dev_to_zram(struct device *dev)
62{
63 return (struct zram *)dev_to_disk(dev)->private_data;
64}
65
66static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070071 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030072}
73
74static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070077 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030078 struct zram *zram = dev_to_zram(dev);
79
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070080 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030083
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070084 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030085}
86
87static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct zram *zram = dev_to_zram(dev);
91
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070092 return scnprintf(buf, PAGE_SIZE, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070093 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030094}
95
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030096static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
98{
99 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300101
102 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700103 if (init_done(zram)) {
104 struct zram_meta *meta = zram->meta;
Minchan Kim722cdc12014-10-09 15:29:50 -0700105 val = zs_get_total_pages(meta->mem_pool);
Weijie Yang5a99e952014-10-29 14:50:57 -0700106 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300107 up_read(&zram->init_lock);
108
Minchan Kim722cdc12014-10-09 15:29:50 -0700109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300110}
111
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700112static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 int val;
116 struct zram *zram = dev_to_zram(dev);
117
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
121
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700123}
124
Minchan Kim9ada9da2014-10-09 15:29:53 -0700125static ssize_t mem_limit_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
127{
128 u64 val;
129 struct zram *zram = dev_to_zram(dev);
130
131 down_read(&zram->init_lock);
132 val = zram->limit_pages;
133 up_read(&zram->init_lock);
134
135 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
136}
137
138static ssize_t mem_limit_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t len)
140{
141 u64 limit;
142 char *tmp;
143 struct zram *zram = dev_to_zram(dev);
144
145 limit = memparse(buf, &tmp);
146 if (buf == tmp) /* no chars parsed, invalid input */
147 return -EINVAL;
148
149 down_write(&zram->init_lock);
150 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 up_write(&zram->init_lock);
152
153 return len;
154}
155
Minchan Kim461a8ee2014-10-09 15:29:55 -0700156static ssize_t mem_used_max_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 u64 val = 0;
160 struct zram *zram = dev_to_zram(dev);
161
162 down_read(&zram->init_lock);
163 if (init_done(zram))
164 val = atomic_long_read(&zram->stats.max_used_pages);
165 up_read(&zram->init_lock);
166
167 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
168}
169
170static ssize_t mem_used_max_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t len)
172{
173 int err;
174 unsigned long val;
175 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700176
177 err = kstrtoul(buf, 10, &val);
178 if (err || val != 0)
179 return -EINVAL;
180
181 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700182 if (init_done(zram)) {
183 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700184 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700186 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700187 up_read(&zram->init_lock);
188
189 return len;
190}
191
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700192static ssize_t max_comp_streams_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t len)
194{
195 int num;
196 struct zram *zram = dev_to_zram(dev);
Minchan Kim60a726e2014-04-07 15:38:21 -0700197 int ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700198
Minchan Kim60a726e2014-04-07 15:38:21 -0700199 ret = kstrtoint(buf, 0, &num);
200 if (ret < 0)
201 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700202 if (num < 1)
203 return -EINVAL;
Minchan Kim60a726e2014-04-07 15:38:21 -0700204
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700205 down_write(&zram->init_lock);
206 if (init_done(zram)) {
Minchan Kim60a726e2014-04-07 15:38:21 -0700207 if (!zcomp_set_max_streams(zram->comp, num)) {
Sergey Senozhatskyfe8eb122014-04-07 15:38:15 -0700208 pr_info("Cannot change max compression streams\n");
Minchan Kim60a726e2014-04-07 15:38:21 -0700209 ret = -EINVAL;
210 goto out;
211 }
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700212 }
Minchan Kim60a726e2014-04-07 15:38:21 -0700213
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700214 zram->max_comp_streams = num;
Minchan Kim60a726e2014-04-07 15:38:21 -0700215 ret = len;
216out:
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700217 up_write(&zram->init_lock);
Minchan Kim60a726e2014-04-07 15:38:21 -0700218 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700219}
220
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700221static ssize_t comp_algorithm_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 size_t sz;
225 struct zram *zram = dev_to_zram(dev);
226
227 down_read(&zram->init_lock);
228 sz = zcomp_available_show(zram->compressor, buf);
229 up_read(&zram->init_lock);
230
231 return sz;
232}
233
234static ssize_t comp_algorithm_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
236{
237 struct zram *zram = dev_to_zram(dev);
238 down_write(&zram->init_lock);
239 if (init_done(zram)) {
240 up_write(&zram->init_lock);
241 pr_info("Can't change algorithm for initialized device\n");
242 return -EBUSY;
243 }
244 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
245 up_write(&zram->init_lock);
246 return len;
247}
248
Minchan Kim92967472014-01-30 15:46:03 -0800249/* flag operations needs meta->tb_lock */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900250static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530251 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530252{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700253 return meta->table[index].value & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530254}
255
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900256static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530257 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530258{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700259 meta->table[index].value |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530260}
261
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900262static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530263 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530264{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700265 meta->table[index].value &= ~BIT(flag);
266}
267
268static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
269{
270 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
271}
272
273static void zram_set_obj_size(struct zram_meta *meta,
274 u32 index, size_t size)
275{
276 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
277
278 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530279}
280
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300281static inline int is_partial_io(struct bio_vec *bvec)
282{
283 return bvec->bv_len != PAGE_SIZE;
284}
285
286/*
287 * Check if request is within bounds and aligned on zram logical blocks.
288 */
karam.lee54850e72014-12-12 16:56:50 -0800289static inline int valid_io_request(struct zram *zram,
290 sector_t start, unsigned int size)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300291{
karam.lee54850e72014-12-12 16:56:50 -0800292 u64 end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530293
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300294 /* unaligned request */
karam.lee54850e72014-12-12 16:56:50 -0800295 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300296 return 0;
karam.lee54850e72014-12-12 16:56:50 -0800297 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300298 return 0;
299
karam.lee54850e72014-12-12 16:56:50 -0800300 end = start + (size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300301 bound = zram->disksize >> SECTOR_SHIFT;
302 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300303 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300304 return 0;
305
306 /* I/O request is valid */
307 return 1;
308}
309
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800310static void zram_meta_free(struct zram_meta *meta, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300311{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800312 size_t num_pages = disksize >> PAGE_SHIFT;
313 size_t index;
314
315 /* Free all pages that are still in this zram device */
316 for (index = 0; index < num_pages; index++) {
317 unsigned long handle = meta->table[index].handle;
318
319 if (!handle)
320 continue;
321
322 zs_free(meta->mem_pool, handle);
323 }
324
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300325 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300326 vfree(meta->table);
327 kfree(meta);
328}
329
Ganesh Mahendran3eba0c62015-02-12 15:00:51 -0800330static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300331{
332 size_t num_pages;
Ganesh Mahendran3eba0c62015-02-12 15:00:51 -0800333 char pool_name[8];
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300334 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800335
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300336 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800337 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300338
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300339 num_pages = disksize >> PAGE_SHIFT;
340 meta->table = vzalloc(num_pages * sizeof(*meta->table));
341 if (!meta->table) {
342 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800343 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300344 }
345
Ganesh Mahendran3eba0c62015-02-12 15:00:51 -0800346 snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
347 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300348 if (!meta->mem_pool) {
349 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800350 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300351 }
352
353 return meta;
354
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800355out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300356 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300357 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800358 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300359}
360
Minchan Kim08eee692015-02-12 15:00:45 -0800361static inline bool zram_meta_get(struct zram *zram)
362{
363 if (atomic_inc_not_zero(&zram->refcount))
364 return true;
365 return false;
366}
367
368static inline void zram_meta_put(struct zram *zram)
369{
370 atomic_dec(&zram->refcount);
371}
372
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300373static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
374{
375 if (*offset + bvec->bv_len >= PAGE_SIZE)
376 (*index)++;
377 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
378}
379
Nitin Gupta306b0c92009-09-22 10:26:53 +0530380static int page_zero_filled(void *ptr)
381{
382 unsigned int pos;
383 unsigned long *page;
384
385 page = (unsigned long *)ptr;
386
387 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
388 if (page[pos])
389 return 0;
390 }
391
392 return 1;
393}
394
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300395static void handle_zero_page(struct bio_vec *bvec)
396{
397 struct page *page = bvec->bv_page;
398 void *user_mem;
399
400 user_mem = kmap_atomic(page);
401 if (is_partial_io(bvec))
402 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
403 else
404 clear_page(user_mem);
405 kunmap_atomic(user_mem);
406
407 flush_dcache_page(page);
408}
409
Weijie Yangd2d5e762014-08-06 16:08:31 -0700410
411/*
412 * To protect concurrent access to the same index entry,
413 * caller should hold this table index entry's bit_spinlock to
414 * indicate this index entry is accessing.
415 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530416static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530417{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900418 struct zram_meta *meta = zram->meta;
419 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530420
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600421 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530422 /*
423 * No memory is allocated for zero filled pages.
424 * Simply clear zero page flag.
425 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900426 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
427 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700428 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530429 }
430 return;
431 }
432
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900433 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530434
Weijie Yangd2d5e762014-08-06 16:08:31 -0700435 atomic64_sub(zram_get_obj_size(meta, index),
436 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700437 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530438
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900439 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700440 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530441}
442
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300443static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530444{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700445 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200446 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900447 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800448 unsigned long handle;
Minchan Kim023b4092014-08-06 16:08:29 -0700449 size_t size;
Minchan Kim92967472014-01-30 15:46:03 -0800450
Weijie Yangd2d5e762014-08-06 16:08:31 -0700451 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800452 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700453 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200454
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900455 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700456 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800457 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200458 return 0;
459 }
460
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900461 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim92967472014-01-30 15:46:03 -0800462 if (size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800463 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300464 else
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700465 ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900466 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700467 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200468
469 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700470 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200471 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200472 return ret;
473 }
474
475 return 0;
476}
477
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300478static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800479 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300480{
481 int ret;
482 struct page *page;
483 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900484 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300485 page = bvec->bv_page;
486
Weijie Yangd2d5e762014-08-06 16:08:31 -0700487 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900488 if (unlikely(!meta->table[index].handle) ||
489 zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700490 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300491 handle_zero_page(bvec);
492 return 0;
493 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700494 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300495
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300496 if (is_partial_io(bvec))
497 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900498 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
499
500 user_mem = kmap_atomic(page);
501 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300502 uncmem = user_mem;
503
504 if (!uncmem) {
505 pr_info("Unable to allocate temp memory\n");
506 ret = -ENOMEM;
507 goto out_cleanup;
508 }
509
510 ret = zram_decompress_page(zram, uncmem, index);
511 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700512 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300513 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300514
515 if (is_partial_io(bvec))
516 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
517 bvec->bv_len);
518
519 flush_dcache_page(page);
520 ret = 0;
521out_cleanup:
522 kunmap_atomic(user_mem);
523 if (is_partial_io(bvec))
524 kfree(uncmem);
525 return ret;
526}
527
Minchan Kim461a8ee2014-10-09 15:29:55 -0700528static inline void update_used_max(struct zram *zram,
529 const unsigned long pages)
530{
531 int old_max, cur_max;
532
533 old_max = atomic_long_read(&zram->stats.max_used_pages);
534
535 do {
536 cur_max = old_max;
537 if (pages > cur_max)
538 old_max = atomic_long_cmpxchg(
539 &zram->stats.max_used_pages, cur_max, pages);
540 } while (old_max != cur_max);
541}
542
Jerome Marchand924bd882011-06-10 15:28:48 +0200543static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
544 int offset)
545{
Nitin Gupta397c6062013-01-02 08:53:41 -0800546 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200547 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900548 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900549 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200550 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900551 struct zram_meta *meta = zram->meta;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700552 struct zcomp_strm *zstrm;
Minchan Kime46e3312014-01-30 15:46:06 -0800553 bool locked = false;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700554 unsigned long alloced_pages;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200555
556 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200557 if (is_partial_io(bvec)) {
558 /*
559 * This is a partial IO. We need to read the full page
560 * before to write the changes.
561 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900562 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200563 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200564 ret = -ENOMEM;
565 goto out;
566 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300567 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800568 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200569 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200570 }
571
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700572 zstrm = zcomp_strm_find(zram->comp);
Minchan Kime46e3312014-01-30 15:46:06 -0800573 locked = true;
Cong Wangba82fe22011-11-25 23:14:25 +0800574 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200575
Nitin Gupta397c6062013-01-02 08:53:41 -0800576 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200577 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
578 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800579 kunmap_atomic(user_mem);
580 user_mem = NULL;
581 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200582 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800583 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200584
585 if (page_zero_filled(uncmem)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800586 if (user_mem)
587 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900588 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700589 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900590 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800591 zram_set_flag(meta, index, ZRAM_ZERO);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700592 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900593
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700594 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200595 ret = 0;
596 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200597 }
598
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700599 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800600 if (!is_partial_io(bvec)) {
601 kunmap_atomic(user_mem);
602 user_mem = NULL;
603 uncmem = NULL;
604 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200605
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700606 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200607 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200608 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200609 }
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700610 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700611 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700612 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800613 if (is_partial_io(bvec))
614 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700615 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200616
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900617 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600618 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400619 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
620 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200621 ret = -ENOMEM;
622 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200623 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700624
Minchan Kim461a8ee2014-10-09 15:29:55 -0700625 alloced_pages = zs_get_total_pages(meta->mem_pool);
626 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700627 zs_free(meta->mem_pool, handle);
628 ret = -ENOMEM;
629 goto out;
630 }
631
Minchan Kim461a8ee2014-10-09 15:29:55 -0700632 update_used_max(zram, alloced_pages);
633
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900634 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200635
Jiang Liu42e99bd2013-06-07 00:07:30 +0800636 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800637 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800638 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800639 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800640 } else {
641 memcpy(cmem, src, clen);
642 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200643
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700644 zcomp_strm_release(zram->comp, zstrm);
645 locked = false;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900646 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600647
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900648 /*
649 * Free memory associated with this sector
650 * before overwriting unused sectors.
651 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700652 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900653 zram_free_page(zram, index);
654
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900655 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700656 zram_set_obj_size(meta, index, clen);
657 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200658
659 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700660 atomic64_add(clen, &zram->stats.compr_data_size);
661 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200662out:
Minchan Kime46e3312014-01-30 15:46:06 -0800663 if (locked)
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700664 zcomp_strm_release(zram->comp, zstrm);
Nitin Gupta397c6062013-01-02 08:53:41 -0800665 if (is_partial_io(bvec))
666 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200667 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200668}
669
670static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
karam.leeb627cff2014-12-12 16:56:47 -0800671 int offset, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200672{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200673 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200674
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700675 if (rw == READ) {
676 atomic64_inc(&zram->stats.num_reads);
karam.leeb627cff2014-12-12 16:56:47 -0800677 ret = zram_bvec_read(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700678 } else {
679 atomic64_inc(&zram->stats.num_writes);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200680 ret = zram_bvec_write(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700681 }
Jerome Marchandc5bde232011-06-10 15:28:49 +0200682
Chao Yu0cf1e9d2014-08-29 15:18:37 -0700683 if (unlikely(ret)) {
684 if (rw == READ)
685 atomic64_inc(&zram->stats.failed_reads);
686 else
687 atomic64_inc(&zram->stats.failed_writes);
688 }
689
Jerome Marchandc5bde232011-06-10 15:28:49 +0200690 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200691}
692
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700693/*
694 * zram_bio_discard - handler on discard request
695 * @index: physical block index in PAGE_SIZE units
696 * @offset: byte offset within physical block
697 */
698static void zram_bio_discard(struct zram *zram, u32 index,
699 int offset, struct bio *bio)
700{
701 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700702 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700703
704 /*
705 * zram manages data in physical block size units. Because logical block
706 * size isn't identical with physical block size on some arch, we
707 * could get a discard request pointing to a specific offset within a
708 * certain physical block. Although we can handle this request by
709 * reading that physiclal block and decompressing and partially zeroing
710 * and re-compressing and then re-storing it, this isn't reasonable
711 * because our intent with a discard request is to save memory. So
712 * skipping this logical block is appropriate here.
713 */
714 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700715 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700716 return;
717
Weijie Yang38515c72014-06-04 16:11:06 -0700718 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700719 index++;
720 }
721
722 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700723 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700724 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700725 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700726 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700727 index++;
728 n -= PAGE_SIZE;
729 }
730}
731
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800732static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +0200733{
Minchan Kim08eee692015-02-12 15:00:45 -0800734 struct zram_meta *meta;
735 struct zcomp *comp;
736 u64 disksize;
737
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300738 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -0700739
740 zram->limit_pages = 0;
741
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700742 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300743 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300744 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300745 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300746
Minchan Kim08eee692015-02-12 15:00:45 -0800747 meta = zram->meta;
748 comp = zram->comp;
749 disksize = zram->disksize;
750 /*
751 * Refcount will go down to 0 eventually and r/w handler
752 * cannot handle further I/O so it will bail out by
753 * check zram_meta_get.
754 */
755 zram_meta_put(zram);
756 /*
757 * We want to free zram_meta in process context to avoid
758 * deadlock between reclaim path and any other locks.
759 */
760 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
761
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300762 /* Reset stats */
763 memset(&zram->stats, 0, sizeof(zram->stats));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300764 zram->disksize = 0;
Minchan Kim08eee692015-02-12 15:00:45 -0800765 zram->max_comp_streams = 1;
Sergey Senozhatskya096caf2015-02-12 15:00:39 -0800766 set_capacity(zram->disk, 0);
767
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300768 up_write(&zram->init_lock);
Minchan Kim08eee692015-02-12 15:00:45 -0800769 /* I/O operation under all of CPU are done so let's free */
770 zram_meta_free(meta, disksize);
771 zcomp_destroy(comp);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300772}
773
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300774static ssize_t disksize_store(struct device *dev,
775 struct device_attribute *attr, const char *buf, size_t len)
776{
777 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700778 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300779 struct zram_meta *meta;
780 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700781 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300782
783 disksize = memparse(buf, NULL);
784 if (!disksize)
785 return -EINVAL;
786
787 disksize = PAGE_ALIGN(disksize);
Ganesh Mahendran3eba0c62015-02-12 15:00:51 -0800788 meta = zram_meta_alloc(zram->disk->first_minor, disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800789 if (!meta)
790 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700791
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700792 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700793 if (IS_ERR(comp)) {
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700794 pr_info("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700795 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700796 err = PTR_ERR(comp);
797 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700798 }
799
800 down_write(&zram->init_lock);
801 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700802 pr_info("Cannot change disksize for initialized device\n");
803 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700804 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300805 }
806
Minchan Kim08eee692015-02-12 15:00:45 -0800807 init_waitqueue_head(&zram->io_done);
808 atomic_set(&zram->refcount, 1);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700809 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700810 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300811 zram->disksize = disksize;
812 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300813 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700814
815 /*
816 * Revalidate disk out of the init_lock to avoid lockdep splat.
817 * It's okay because disk's capacity is protected by init_lock
818 * so that revalidate_disk always sees up-to-date capacity.
819 */
820 revalidate_disk(zram->disk);
821
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300822 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700823
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700824out_destroy_comp:
825 up_write(&zram->init_lock);
826 zcomp_destroy(comp);
827out_free_meta:
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800828 zram_meta_free(meta, disksize);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700829 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300830}
831
832static ssize_t reset_store(struct device *dev,
833 struct device_attribute *attr, const char *buf, size_t len)
834{
835 int ret;
836 unsigned short do_reset;
837 struct zram *zram;
838 struct block_device *bdev;
839
840 zram = dev_to_zram(dev);
841 bdev = bdget_disk(zram->disk, 0);
842
Rashika Kheria46a51c82013-10-30 18:36:32 +0530843 if (!bdev)
844 return -ENOMEM;
845
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800846 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300847 /* Do not reset an active device! */
Minchan Kim2b269ce2015-02-12 15:00:42 -0800848 if (bdev->bd_openers) {
Rashika Kheria1b672222013-11-10 22:13:53 +0530849 ret = -EBUSY;
850 goto out;
851 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300852
853 ret = kstrtou16(buf, 10, &do_reset);
854 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530855 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300856
Rashika Kheria1b672222013-11-10 22:13:53 +0530857 if (!do_reset) {
858 ret = -EINVAL;
859 goto out;
860 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300861
862 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530863 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800864 zram_reset_device(zram);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800865
866 mutex_unlock(&bdev->bd_mutex);
867 revalidate_disk(zram->disk);
Rashika Kheria1b672222013-11-10 22:13:53 +0530868 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300869
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300870 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530871
872out:
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800873 mutex_unlock(&bdev->bd_mutex);
Rashika Kheria1b672222013-11-10 22:13:53 +0530874 bdput(bdev);
875 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200876}
877
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700878static void __zram_make_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530879{
karam.leeb627cff2014-12-12 16:56:47 -0800880 int offset, rw;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530881 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800882 struct bio_vec bvec;
883 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530884
Kent Overstreet4f024f32013-10-11 15:44:27 -0700885 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
886 offset = (bio->bi_iter.bi_sector &
887 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530888
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700889 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
890 zram_bio_discard(zram, index, offset, bio);
891 bio_endio(bio, 0);
892 return;
893 }
894
karam.leeb627cff2014-12-12 16:56:47 -0800895 rw = bio_data_dir(bio);
Kent Overstreet79886132013-11-23 17:19:00 -0800896 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200897 int max_transfer_size = PAGE_SIZE - offset;
898
Kent Overstreet79886132013-11-23 17:19:00 -0800899 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200900 /*
901 * zram_bvec_rw() can only make operation on a single
902 * zram page. Split the bio vector.
903 */
904 struct bio_vec bv;
905
Kent Overstreet79886132013-11-23 17:19:00 -0800906 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200907 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800908 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200909
karam.leeb627cff2014-12-12 16:56:47 -0800910 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200911 goto out;
912
Kent Overstreet79886132013-11-23 17:19:00 -0800913 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200914 bv.bv_offset += max_transfer_size;
karam.leeb627cff2014-12-12 16:56:47 -0800915 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200916 goto out;
917 } else
karam.leeb627cff2014-12-12 16:56:47 -0800918 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200919 goto out;
920
Kent Overstreet79886132013-11-23 17:19:00 -0800921 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530922 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530923
924 set_bit(BIO_UPTODATE, &bio->bi_flags);
925 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500926 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530927
928out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530929 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530930}
931
Nitin Gupta306b0c92009-09-22 10:26:53 +0530932/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530933 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530934 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200935static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530936{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530937 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530938
Minchan Kim08eee692015-02-12 15:00:45 -0800939 if (unlikely(!zram_meta_get(zram)))
Minchan Kim3de738c2013-01-30 11:41:41 +0900940 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200941
karam.lee54850e72014-12-12 16:56:50 -0800942 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
943 bio->bi_iter.bi_size)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800944 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim08eee692015-02-12 15:00:45 -0800945 goto put_zram;
Jerome Marchand6642a672011-02-17 17:11:49 +0100946 }
947
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700948 __zram_make_request(zram, bio);
Minchan Kim08eee692015-02-12 15:00:45 -0800949 zram_meta_put(zram);
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700950 return;
Minchan Kim08eee692015-02-12 15:00:45 -0800951put_zram:
952 zram_meta_put(zram);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200953error:
954 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530955}
956
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400957static void zram_slot_free_notify(struct block_device *bdev,
958 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530959{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530960 struct zram *zram;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800961 struct zram_meta *meta;
Nitin Gupta107c1612010-05-17 11:02:44 +0530962
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530963 zram = bdev->bd_disk->private_data;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800964 meta = zram->meta;
965
Weijie Yangd2d5e762014-08-06 16:08:31 -0700966 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kimf614a9f2014-01-30 15:46:04 -0800967 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700968 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800969 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530970}
971
karam.lee8c7f0102014-12-12 16:56:53 -0800972static int zram_rw_page(struct block_device *bdev, sector_t sector,
973 struct page *page, int rw)
974{
Minchan Kim08eee692015-02-12 15:00:45 -0800975 int offset, err = -EIO;
karam.lee8c7f0102014-12-12 16:56:53 -0800976 u32 index;
977 struct zram *zram;
978 struct bio_vec bv;
979
980 zram = bdev->bd_disk->private_data;
Minchan Kim08eee692015-02-12 15:00:45 -0800981 if (unlikely(!zram_meta_get(zram)))
982 goto out;
983
karam.lee8c7f0102014-12-12 16:56:53 -0800984 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
985 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim08eee692015-02-12 15:00:45 -0800986 err = -EINVAL;
987 goto put_zram;
karam.lee8c7f0102014-12-12 16:56:53 -0800988 }
989
990 index = sector >> SECTORS_PER_PAGE_SHIFT;
991 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
992
993 bv.bv_page = page;
994 bv.bv_len = PAGE_SIZE;
995 bv.bv_offset = 0;
996
997 err = zram_bvec_rw(zram, &bv, index, offset, rw);
Minchan Kim08eee692015-02-12 15:00:45 -0800998put_zram:
999 zram_meta_put(zram);
1000out:
karam.lee8c7f0102014-12-12 16:56:53 -08001001 /*
1002 * If I/O fails, just return error(ie, non-zero) without
1003 * calling page_endio.
1004 * It causes resubmit the I/O with bio request by upper functions
1005 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1006 * bio->bi_end_io does things to handle the error
1007 * (e.g., SetPageError, set_page_dirty and extra works).
1008 */
1009 if (err == 0)
1010 page_endio(page, rw, 0);
1011 return err;
1012}
1013
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301014static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301015 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -08001016 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +05301017 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +05301018};
1019
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001020static DEVICE_ATTR_RW(disksize);
1021static DEVICE_ATTR_RO(initstate);
1022static DEVICE_ATTR_WO(reset);
1023static DEVICE_ATTR_RO(orig_data_size);
1024static DEVICE_ATTR_RO(mem_used_total);
1025static DEVICE_ATTR_RW(mem_limit);
1026static DEVICE_ATTR_RW(mem_used_max);
1027static DEVICE_ATTR_RW(max_comp_streams);
1028static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001029
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001030ZRAM_ATTR_RO(num_reads);
1031ZRAM_ATTR_RO(num_writes);
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001032ZRAM_ATTR_RO(failed_reads);
1033ZRAM_ATTR_RO(failed_writes);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001034ZRAM_ATTR_RO(invalid_io);
1035ZRAM_ATTR_RO(notify_free);
1036ZRAM_ATTR_RO(zero_pages);
1037ZRAM_ATTR_RO(compr_data_size);
1038
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001039static struct attribute *zram_disk_attrs[] = {
1040 &dev_attr_disksize.attr,
1041 &dev_attr_initstate.attr,
1042 &dev_attr_reset.attr,
1043 &dev_attr_num_reads.attr,
1044 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001045 &dev_attr_failed_reads.attr,
1046 &dev_attr_failed_writes.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001047 &dev_attr_invalid_io.attr,
1048 &dev_attr_notify_free.attr,
1049 &dev_attr_zero_pages.attr,
1050 &dev_attr_orig_data_size.attr,
1051 &dev_attr_compr_data_size.attr,
1052 &dev_attr_mem_used_total.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001053 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001054 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001055 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001056 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001057 NULL,
1058};
1059
1060static struct attribute_group zram_disk_attr_group = {
1061 .attrs = zram_disk_attrs,
1062};
1063
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301064static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301065{
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001066 struct request_queue *queue;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001067 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301068
Jerome Marchand0900bea2011-09-06 15:02:11 +02001069 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301070
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001071 queue = blk_alloc_queue(GFP_KERNEL);
1072 if (!queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301073 pr_err("Error allocating disk queue for device %d\n",
1074 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301075 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301076 }
1077
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001078 blk_queue_make_request(queue, zram_make_request);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301079
1080 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301081 zram->disk = alloc_disk(1);
1082 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -07001083 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301084 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001085 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301086 }
1087
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301088 zram->disk->major = zram_major;
1089 zram->disk->first_minor = device_id;
1090 zram->disk->fops = &zram_devops;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001091 zram->disk->queue = queue;
1092 zram->disk->queue->queuedata = zram;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301093 zram->disk->private_data = zram;
1094 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301095
Nitin Gupta33863c22010-08-09 22:56:47 +05301096 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301097 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001098 /* zram devices sort of resembles non-rotational disks */
1099 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001100 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301101 /*
1102 * To ensure that we always get PAGE_SIZE aligned
1103 * and n*PAGE_SIZED sized I/O requests.
1104 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301105 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001106 blk_queue_logical_block_size(zram->disk->queue,
1107 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301108 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1109 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001110 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1111 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1112 /*
1113 * zram_bio_discard() will clear all logical blocks if logical block
1114 * size is identical with physical block size(PAGE_SIZE). But if it is
1115 * different, we will skip discarding some parts of logical blocks in
1116 * the part of the request range which isn't aligned to physical block
1117 * size. So we can't ensure that all discarded logical blocks are
1118 * zeroed.
1119 */
1120 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1121 zram->disk->queue->limits.discard_zeroes_data = 1;
1122 else
1123 zram->disk->queue->limits.discard_zeroes_data = 0;
1124 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301125
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301126 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301127
Nitin Gupta33863c22010-08-09 22:56:47 +05301128 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1129 &zram_disk_attr_group);
1130 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001131 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001132 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301133 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001134 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001135 zram->meta = NULL;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001136 zram->max_comp_streams = 1;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001137 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301138
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001139out_free_disk:
1140 del_gendisk(zram->disk);
1141 put_disk(zram->disk);
1142out_free_queue:
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001143 blk_cleanup_queue(queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301144out:
1145 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301146}
1147
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001148static void destroy_devices(unsigned int nr)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301149{
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001150 struct zram *zram;
1151 unsigned int i;
Nitin Gupta33863c22010-08-09 22:56:47 +05301152
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001153 for (i = 0; i < nr; i++) {
1154 zram = &zram_devices[i];
1155 /*
1156 * Remove sysfs first, so no one will perform a disksize
1157 * store while we destroy the devices
1158 */
1159 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1160 &zram_disk_attr_group);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301161
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001162 zram_reset_device(zram);
1163
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001164 blk_cleanup_queue(zram->disk->queue);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001165 del_gendisk(zram->disk);
1166 put_disk(zram->disk);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001167 }
1168
1169 kfree(zram_devices);
1170 unregister_blkdev(zram_major, "zram");
1171 pr_info("Destroyed %u device(s)\n", nr);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301172}
1173
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301174static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301175{
Nitin Guptade1a21a2010-01-28 21:13:40 +05301176 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301177
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001178 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -07001179 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001180 num_devices);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001181 return -EINVAL;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301182 }
1183
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301184 zram_major = register_blkdev(0, "zram");
1185 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001186 pr_warn("Unable to get major number\n");
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001187 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301188 }
1189
Nitin Gupta306b0c92009-09-22 10:26:53 +05301190 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001191 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -06001192 if (!zram_devices) {
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001193 unregister_blkdev(zram_major, "zram");
1194 return -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301195 }
Nitin Gupta306b0c92009-09-22 10:26:53 +05301196
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001197 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -06001198 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301199 if (ret)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001200 goto out_error;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301201 }
1202
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001203 pr_info("Created %u device(s)\n", num_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301204 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301205
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001206out_error:
1207 destroy_devices(dev_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301208 return ret;
1209}
1210
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301211static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301212{
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001213 destroy_devices(num_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301214}
1215
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301216module_init(zram_init);
1217module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301218
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001219module_param(num_devices, uint, 0);
1220MODULE_PARM_DESC(num_devices, "Number of zram devices");
1221
Nitin Gupta306b0c92009-09-22 10:26:53 +05301222MODULE_LICENSE("Dual BSD/GPL");
1223MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301224MODULE_DESCRIPTION("Compressed RAM Block Device");