blob: 0abcf4a31c786ca09ab03596ac0752be23c948dd [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
Robert Jenningsb1f5b812011-01-28 08:59:26 -060018#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
Nitin Gupta306b0c92009-09-22 10:26:53 +053022#include <linux/module.h>
23#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070024#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053025#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053032#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053033#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070034#include <linux/err.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
38/* Globals */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053039static int zram_major;
Jiang Liu0f0e3ba2013-06-07 00:07:29 +080040static struct zram *zram_devices;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070041static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053042
Nitin Gupta306b0c92009-09-22 10:26:53 +053043/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080044static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053045
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070046#define ZRAM_ATTR_RO(name) \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080047static ssize_t name##_show(struct device *d, \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070048 struct device_attribute *attr, char *b) \
49{ \
50 struct zram *zram = dev_to_zram(d); \
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070051 return scnprintf(b, PAGE_SIZE, "%llu\n", \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070052 (u64)atomic64_read(&zram->stats.name)); \
53} \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080054static DEVICE_ATTR_RO(name);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070055
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070056static inline int init_done(struct zram *zram)
57{
58 return zram->meta != NULL;
59}
60
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030061static inline struct zram *dev_to_zram(struct device *dev)
62{
63 return (struct zram *)dev_to_disk(dev)->private_data;
64}
65
66static ssize_t disksize_show(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct zram *zram = dev_to_zram(dev);
70
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070071 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030072}
73
74static ssize_t initstate_show(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070077 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030078 struct zram *zram = dev_to_zram(dev);
79
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070080 down_read(&zram->init_lock);
81 val = init_done(zram);
82 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030083
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070084 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030085}
86
87static ssize_t orig_data_size_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
89{
90 struct zram *zram = dev_to_zram(dev);
91
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070092 return scnprintf(buf, PAGE_SIZE, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -070093 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030094}
95
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030096static ssize_t mem_used_total_show(struct device *dev,
97 struct device_attribute *attr, char *buf)
98{
99 u64 val = 0;
100 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300101
102 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700103 if (init_done(zram)) {
104 struct zram_meta *meta = zram->meta;
Minchan Kim722cdc12014-10-09 15:29:50 -0700105 val = zs_get_total_pages(meta->mem_pool);
Weijie Yang5a99e952014-10-29 14:50:57 -0700106 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300107 up_read(&zram->init_lock);
108
Minchan Kim722cdc12014-10-09 15:29:50 -0700109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300110}
111
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700112static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 int val;
116 struct zram *zram = dev_to_zram(dev);
117
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
121
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700123}
124
Minchan Kim9ada9da2014-10-09 15:29:53 -0700125static ssize_t mem_limit_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
127{
128 u64 val;
129 struct zram *zram = dev_to_zram(dev);
130
131 down_read(&zram->init_lock);
132 val = zram->limit_pages;
133 up_read(&zram->init_lock);
134
135 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
136}
137
138static ssize_t mem_limit_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t len)
140{
141 u64 limit;
142 char *tmp;
143 struct zram *zram = dev_to_zram(dev);
144
145 limit = memparse(buf, &tmp);
146 if (buf == tmp) /* no chars parsed, invalid input */
147 return -EINVAL;
148
149 down_write(&zram->init_lock);
150 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 up_write(&zram->init_lock);
152
153 return len;
154}
155
Minchan Kim461a8ee2014-10-09 15:29:55 -0700156static ssize_t mem_used_max_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 u64 val = 0;
160 struct zram *zram = dev_to_zram(dev);
161
162 down_read(&zram->init_lock);
163 if (init_done(zram))
164 val = atomic_long_read(&zram->stats.max_used_pages);
165 up_read(&zram->init_lock);
166
167 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
168}
169
170static ssize_t mem_used_max_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t len)
172{
173 int err;
174 unsigned long val;
175 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700176
177 err = kstrtoul(buf, 10, &val);
178 if (err || val != 0)
179 return -EINVAL;
180
181 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700182 if (init_done(zram)) {
183 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700184 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700186 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700187 up_read(&zram->init_lock);
188
189 return len;
190}
191
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700192static ssize_t max_comp_streams_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t len)
194{
195 int num;
196 struct zram *zram = dev_to_zram(dev);
Minchan Kim60a726e2014-04-07 15:38:21 -0700197 int ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700198
Minchan Kim60a726e2014-04-07 15:38:21 -0700199 ret = kstrtoint(buf, 0, &num);
200 if (ret < 0)
201 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700202 if (num < 1)
203 return -EINVAL;
Minchan Kim60a726e2014-04-07 15:38:21 -0700204
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700205 down_write(&zram->init_lock);
206 if (init_done(zram)) {
Minchan Kim60a726e2014-04-07 15:38:21 -0700207 if (!zcomp_set_max_streams(zram->comp, num)) {
Sergey Senozhatskyfe8eb122014-04-07 15:38:15 -0700208 pr_info("Cannot change max compression streams\n");
Minchan Kim60a726e2014-04-07 15:38:21 -0700209 ret = -EINVAL;
210 goto out;
211 }
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700212 }
Minchan Kim60a726e2014-04-07 15:38:21 -0700213
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700214 zram->max_comp_streams = num;
Minchan Kim60a726e2014-04-07 15:38:21 -0700215 ret = len;
216out:
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700217 up_write(&zram->init_lock);
Minchan Kim60a726e2014-04-07 15:38:21 -0700218 return ret;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700219}
220
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700221static ssize_t comp_algorithm_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 size_t sz;
225 struct zram *zram = dev_to_zram(dev);
226
227 down_read(&zram->init_lock);
228 sz = zcomp_available_show(zram->compressor, buf);
229 up_read(&zram->init_lock);
230
231 return sz;
232}
233
234static ssize_t comp_algorithm_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
236{
237 struct zram *zram = dev_to_zram(dev);
238 down_write(&zram->init_lock);
239 if (init_done(zram)) {
240 up_write(&zram->init_lock);
241 pr_info("Can't change algorithm for initialized device\n");
242 return -EBUSY;
243 }
244 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
245 up_write(&zram->init_lock);
246 return len;
247}
248
Minchan Kim92967472014-01-30 15:46:03 -0800249/* flag operations needs meta->tb_lock */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900250static int zram_test_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530251 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530252{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700253 return meta->table[index].value & BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530254}
255
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900256static void zram_set_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530257 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530258{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700259 meta->table[index].value |= BIT(flag);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530260}
261
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900262static void zram_clear_flag(struct zram_meta *meta, u32 index,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530263 enum zram_pageflags flag)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530264{
Weijie Yangd2d5e762014-08-06 16:08:31 -0700265 meta->table[index].value &= ~BIT(flag);
266}
267
268static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
269{
270 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
271}
272
273static void zram_set_obj_size(struct zram_meta *meta,
274 u32 index, size_t size)
275{
276 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
277
278 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530279}
280
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300281static inline int is_partial_io(struct bio_vec *bvec)
282{
283 return bvec->bv_len != PAGE_SIZE;
284}
285
286/*
287 * Check if request is within bounds and aligned on zram logical blocks.
288 */
karam.lee54850e72014-12-12 16:56:50 -0800289static inline int valid_io_request(struct zram *zram,
290 sector_t start, unsigned int size)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300291{
karam.lee54850e72014-12-12 16:56:50 -0800292 u64 end, bound;
Kumar Gaurava539c722013-08-08 23:53:24 +0530293
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300294 /* unaligned request */
karam.lee54850e72014-12-12 16:56:50 -0800295 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300296 return 0;
karam.lee54850e72014-12-12 16:56:50 -0800297 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300298 return 0;
299
karam.lee54850e72014-12-12 16:56:50 -0800300 end = start + (size >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300301 bound = zram->disksize >> SECTOR_SHIFT;
302 /* out of range range */
Sergey Senozhatsky75c7caf2013-06-22 17:21:00 +0300303 if (unlikely(start >= bound || end > bound || start > end))
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300304 return 0;
305
306 /* I/O request is valid */
307 return 1;
308}
309
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800310static void zram_meta_free(struct zram_meta *meta, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300311{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800312 size_t num_pages = disksize >> PAGE_SHIFT;
313 size_t index;
314
315 /* Free all pages that are still in this zram device */
316 for (index = 0; index < num_pages; index++) {
317 unsigned long handle = meta->table[index].handle;
318
319 if (!handle)
320 continue;
321
322 zs_free(meta->mem_pool, handle);
323 }
324
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300325 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300326 vfree(meta->table);
327 kfree(meta);
328}
329
330static struct zram_meta *zram_meta_alloc(u64 disksize)
331{
332 size_t num_pages;
333 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800334
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300335 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800336 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300337
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300338 num_pages = disksize >> PAGE_SHIFT;
339 meta->table = vzalloc(num_pages * sizeof(*meta->table));
340 if (!meta->table) {
341 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800342 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300343 }
344
345 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
346 if (!meta->mem_pool) {
347 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800348 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300349 }
350
351 return meta;
352
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800353out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300354 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300355 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800356 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300357}
358
359static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
360{
361 if (*offset + bvec->bv_len >= PAGE_SIZE)
362 (*index)++;
363 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
364}
365
Nitin Gupta306b0c92009-09-22 10:26:53 +0530366static int page_zero_filled(void *ptr)
367{
368 unsigned int pos;
369 unsigned long *page;
370
371 page = (unsigned long *)ptr;
372
373 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
374 if (page[pos])
375 return 0;
376 }
377
378 return 1;
379}
380
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300381static void handle_zero_page(struct bio_vec *bvec)
382{
383 struct page *page = bvec->bv_page;
384 void *user_mem;
385
386 user_mem = kmap_atomic(page);
387 if (is_partial_io(bvec))
388 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
389 else
390 clear_page(user_mem);
391 kunmap_atomic(user_mem);
392
393 flush_dcache_page(page);
394}
395
Weijie Yangd2d5e762014-08-06 16:08:31 -0700396
397/*
398 * To protect concurrent access to the same index entry,
399 * caller should hold this table index entry's bit_spinlock to
400 * indicate this index entry is accessing.
401 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530402static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530403{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900404 struct zram_meta *meta = zram->meta;
405 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530406
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600407 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530408 /*
409 * No memory is allocated for zero filled pages.
410 * Simply clear zero page flag.
411 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900412 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
413 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700414 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530415 }
416 return;
417 }
418
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900419 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530420
Weijie Yangd2d5e762014-08-06 16:08:31 -0700421 atomic64_sub(zram_get_obj_size(meta, index),
422 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700423 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530424
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900425 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700426 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530427}
428
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300429static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530430{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700431 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200432 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900433 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800434 unsigned long handle;
Minchan Kim023b4092014-08-06 16:08:29 -0700435 size_t size;
Minchan Kim92967472014-01-30 15:46:03 -0800436
Weijie Yangd2d5e762014-08-06 16:08:31 -0700437 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800438 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700439 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200440
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900441 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700442 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800443 clear_page(mem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200444 return 0;
445 }
446
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900447 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Minchan Kim92967472014-01-30 15:46:03 -0800448 if (size == PAGE_SIZE)
Jiang Liu42e99bd2013-06-07 00:07:30 +0800449 copy_page(mem, cmem);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300450 else
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700451 ret = zcomp_decompress(zram->comp, cmem, size, mem);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900452 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700453 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200454
455 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700456 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200457 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200458 return ret;
459 }
460
461 return 0;
462}
463
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300464static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800465 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300466{
467 int ret;
468 struct page *page;
469 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900470 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300471 page = bvec->bv_page;
472
Weijie Yangd2d5e762014-08-06 16:08:31 -0700473 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900474 if (unlikely(!meta->table[index].handle) ||
475 zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700476 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300477 handle_zero_page(bvec);
478 return 0;
479 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700480 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300481
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300482 if (is_partial_io(bvec))
483 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900484 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
485
486 user_mem = kmap_atomic(page);
487 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300488 uncmem = user_mem;
489
490 if (!uncmem) {
491 pr_info("Unable to allocate temp memory\n");
492 ret = -ENOMEM;
493 goto out_cleanup;
494 }
495
496 ret = zram_decompress_page(zram, uncmem, index);
497 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700498 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300499 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300500
501 if (is_partial_io(bvec))
502 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
503 bvec->bv_len);
504
505 flush_dcache_page(page);
506 ret = 0;
507out_cleanup:
508 kunmap_atomic(user_mem);
509 if (is_partial_io(bvec))
510 kfree(uncmem);
511 return ret;
512}
513
Minchan Kim461a8ee2014-10-09 15:29:55 -0700514static inline void update_used_max(struct zram *zram,
515 const unsigned long pages)
516{
517 int old_max, cur_max;
518
519 old_max = atomic_long_read(&zram->stats.max_used_pages);
520
521 do {
522 cur_max = old_max;
523 if (pages > cur_max)
524 old_max = atomic_long_cmpxchg(
525 &zram->stats.max_used_pages, cur_max, pages);
526 } while (old_max != cur_max);
527}
528
Jerome Marchand924bd882011-06-10 15:28:48 +0200529static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
530 int offset)
531{
Nitin Gupta397c6062013-01-02 08:53:41 -0800532 int ret = 0;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200533 size_t clen;
Minchan Kimc2344342012-06-08 15:39:25 +0900534 unsigned long handle;
Minchan Kim130f3152012-06-08 15:39:27 +0900535 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200536 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900537 struct zram_meta *meta = zram->meta;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700538 struct zcomp_strm *zstrm;
Minchan Kime46e3312014-01-30 15:46:06 -0800539 bool locked = false;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700540 unsigned long alloced_pages;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200541
542 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200543 if (is_partial_io(bvec)) {
544 /*
545 * This is a partial IO. We need to read the full page
546 * before to write the changes.
547 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900548 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200549 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200550 ret = -ENOMEM;
551 goto out;
552 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300553 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800554 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200555 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200556 }
557
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700558 zstrm = zcomp_strm_find(zram->comp);
Minchan Kime46e3312014-01-30 15:46:06 -0800559 locked = true;
Cong Wangba82fe22011-11-25 23:14:25 +0800560 user_mem = kmap_atomic(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200561
Nitin Gupta397c6062013-01-02 08:53:41 -0800562 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200563 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
564 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800565 kunmap_atomic(user_mem);
566 user_mem = NULL;
567 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200568 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800569 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200570
571 if (page_zero_filled(uncmem)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800572 if (user_mem)
573 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900574 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700575 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900576 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800577 zram_set_flag(meta, index, ZRAM_ZERO);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700578 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900579
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700580 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200581 ret = 0;
582 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200583 }
584
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700585 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800586 if (!is_partial_io(bvec)) {
587 kunmap_atomic(user_mem);
588 user_mem = NULL;
589 uncmem = NULL;
590 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200591
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700592 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200593 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200594 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200595 }
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700596 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700597 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700598 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800599 if (is_partial_io(bvec))
600 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700601 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200602
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900603 handle = zs_malloc(meta->mem_pool, clen);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600604 if (!handle) {
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400605 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
606 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200607 ret = -ENOMEM;
608 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200609 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700610
Minchan Kim461a8ee2014-10-09 15:29:55 -0700611 alloced_pages = zs_get_total_pages(meta->mem_pool);
612 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700613 zs_free(meta->mem_pool, handle);
614 ret = -ENOMEM;
615 goto out;
616 }
617
Minchan Kim461a8ee2014-10-09 15:29:55 -0700618 update_used_max(zram, alloced_pages);
619
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900620 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200621
Jiang Liu42e99bd2013-06-07 00:07:30 +0800622 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800623 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800624 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800625 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800626 } else {
627 memcpy(cmem, src, clen);
628 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200629
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700630 zcomp_strm_release(zram->comp, zstrm);
631 locked = false;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900632 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600633
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900634 /*
635 * Free memory associated with this sector
636 * before overwriting unused sectors.
637 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700638 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900639 zram_free_page(zram, index);
640
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900641 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700642 zram_set_obj_size(meta, index, clen);
643 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200644
645 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700646 atomic64_add(clen, &zram->stats.compr_data_size);
647 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200648out:
Minchan Kime46e3312014-01-30 15:46:06 -0800649 if (locked)
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700650 zcomp_strm_release(zram->comp, zstrm);
Nitin Gupta397c6062013-01-02 08:53:41 -0800651 if (is_partial_io(bvec))
652 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200653 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200654}
655
656static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
karam.leeb627cff2014-12-12 16:56:47 -0800657 int offset, int rw)
Jerome Marchand8c921b22011-06-10 15:28:47 +0200658{
Jerome Marchandc5bde232011-06-10 15:28:49 +0200659 int ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200660
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700661 if (rw == READ) {
662 atomic64_inc(&zram->stats.num_reads);
karam.leeb627cff2014-12-12 16:56:47 -0800663 ret = zram_bvec_read(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700664 } else {
665 atomic64_inc(&zram->stats.num_writes);
Jerome Marchandc5bde232011-06-10 15:28:49 +0200666 ret = zram_bvec_write(zram, bvec, index, offset);
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700667 }
Jerome Marchandc5bde232011-06-10 15:28:49 +0200668
Chao Yu0cf1e9d2014-08-29 15:18:37 -0700669 if (unlikely(ret)) {
670 if (rw == READ)
671 atomic64_inc(&zram->stats.failed_reads);
672 else
673 atomic64_inc(&zram->stats.failed_writes);
674 }
675
Jerome Marchandc5bde232011-06-10 15:28:49 +0200676 return ret;
Jerome Marchand924bd882011-06-10 15:28:48 +0200677}
678
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700679/*
680 * zram_bio_discard - handler on discard request
681 * @index: physical block index in PAGE_SIZE units
682 * @offset: byte offset within physical block
683 */
684static void zram_bio_discard(struct zram *zram, u32 index,
685 int offset, struct bio *bio)
686{
687 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700688 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700689
690 /*
691 * zram manages data in physical block size units. Because logical block
692 * size isn't identical with physical block size on some arch, we
693 * could get a discard request pointing to a specific offset within a
694 * certain physical block. Although we can handle this request by
695 * reading that physiclal block and decompressing and partially zeroing
696 * and re-compressing and then re-storing it, this isn't reasonable
697 * because our intent with a discard request is to save memory. So
698 * skipping this logical block is appropriate here.
699 */
700 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700701 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700702 return;
703
Weijie Yang38515c72014-06-04 16:11:06 -0700704 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700705 index++;
706 }
707
708 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700709 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700710 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700711 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700712 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700713 index++;
714 n -= PAGE_SIZE;
715 }
716}
717
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800718static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +0200719{
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300720 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -0700721
722 zram->limit_pages = 0;
723
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700724 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300725 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300726 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300727 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300728
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700729 zcomp_destroy(zram->comp);
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700730 zram->max_comp_streams = 1;
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800731 zram_meta_free(zram->meta, zram->disksize);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300732 zram->meta = NULL;
733 /* Reset stats */
734 memset(&zram->stats, 0, sizeof(zram->stats));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300735 zram->disksize = 0;
Sergey Senozhatskya096caf2015-02-12 15:00:39 -0800736 set_capacity(zram->disk, 0);
737
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300738 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300739}
740
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300741static ssize_t disksize_store(struct device *dev,
742 struct device_attribute *attr, const char *buf, size_t len)
743{
744 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700745 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300746 struct zram_meta *meta;
747 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700748 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300749
750 disksize = memparse(buf, NULL);
751 if (!disksize)
752 return -EINVAL;
753
754 disksize = PAGE_ALIGN(disksize);
755 meta = zram_meta_alloc(disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800756 if (!meta)
757 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700758
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700759 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700760 if (IS_ERR(comp)) {
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700761 pr_info("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700762 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700763 err = PTR_ERR(comp);
764 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700765 }
766
767 down_write(&zram->init_lock);
768 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700769 pr_info("Cannot change disksize for initialized device\n");
770 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700771 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300772 }
773
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -0700774 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700775 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300776 zram->disksize = disksize;
777 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300778 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -0700779
780 /*
781 * Revalidate disk out of the init_lock to avoid lockdep splat.
782 * It's okay because disk's capacity is protected by init_lock
783 * so that revalidate_disk always sees up-to-date capacity.
784 */
785 revalidate_disk(zram->disk);
786
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300787 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700788
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700789out_destroy_comp:
790 up_write(&zram->init_lock);
791 zcomp_destroy(comp);
792out_free_meta:
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800793 zram_meta_free(meta, disksize);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700794 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300795}
796
797static ssize_t reset_store(struct device *dev,
798 struct device_attribute *attr, const char *buf, size_t len)
799{
800 int ret;
801 unsigned short do_reset;
802 struct zram *zram;
803 struct block_device *bdev;
804
805 zram = dev_to_zram(dev);
806 bdev = bdget_disk(zram->disk, 0);
807
Rashika Kheria46a51c82013-10-30 18:36:32 +0530808 if (!bdev)
809 return -ENOMEM;
810
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800811 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300812 /* Do not reset an active device! */
Minchan Kim2b269ce2015-02-12 15:00:42 -0800813 if (bdev->bd_openers) {
Rashika Kheria1b672222013-11-10 22:13:53 +0530814 ret = -EBUSY;
815 goto out;
816 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300817
818 ret = kstrtou16(buf, 10, &do_reset);
819 if (ret)
Rashika Kheria1b672222013-11-10 22:13:53 +0530820 goto out;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300821
Rashika Kheria1b672222013-11-10 22:13:53 +0530822 if (!do_reset) {
823 ret = -EINVAL;
824 goto out;
825 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300826
827 /* Make sure all pending I/O is finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +0530828 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800829 zram_reset_device(zram);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800830
831 mutex_unlock(&bdev->bd_mutex);
832 revalidate_disk(zram->disk);
Rashika Kheria1b672222013-11-10 22:13:53 +0530833 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300834
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300835 return len;
Rashika Kheria1b672222013-11-10 22:13:53 +0530836
837out:
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800838 mutex_unlock(&bdev->bd_mutex);
Rashika Kheria1b672222013-11-10 22:13:53 +0530839 bdput(bdev);
840 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200841}
842
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700843static void __zram_make_request(struct zram *zram, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530844{
karam.leeb627cff2014-12-12 16:56:47 -0800845 int offset, rw;
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530846 u32 index;
Kent Overstreet79886132013-11-23 17:19:00 -0800847 struct bio_vec bvec;
848 struct bvec_iter iter;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530849
Kent Overstreet4f024f32013-10-11 15:44:27 -0700850 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
851 offset = (bio->bi_iter.bi_sector &
852 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530853
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700854 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
855 zram_bio_discard(zram, index, offset, bio);
856 bio_endio(bio, 0);
857 return;
858 }
859
karam.leeb627cff2014-12-12 16:56:47 -0800860 rw = bio_data_dir(bio);
Kent Overstreet79886132013-11-23 17:19:00 -0800861 bio_for_each_segment(bvec, bio, iter) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200862 int max_transfer_size = PAGE_SIZE - offset;
863
Kent Overstreet79886132013-11-23 17:19:00 -0800864 if (bvec.bv_len > max_transfer_size) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200865 /*
866 * zram_bvec_rw() can only make operation on a single
867 * zram page. Split the bio vector.
868 */
869 struct bio_vec bv;
870
Kent Overstreet79886132013-11-23 17:19:00 -0800871 bv.bv_page = bvec.bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200872 bv.bv_len = max_transfer_size;
Kent Overstreet79886132013-11-23 17:19:00 -0800873 bv.bv_offset = bvec.bv_offset;
Jerome Marchand924bd882011-06-10 15:28:48 +0200874
karam.leeb627cff2014-12-12 16:56:47 -0800875 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200876 goto out;
877
Kent Overstreet79886132013-11-23 17:19:00 -0800878 bv.bv_len = bvec.bv_len - max_transfer_size;
Jerome Marchand924bd882011-06-10 15:28:48 +0200879 bv.bv_offset += max_transfer_size;
karam.leeb627cff2014-12-12 16:56:47 -0800880 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200881 goto out;
882 } else
karam.leeb627cff2014-12-12 16:56:47 -0800883 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
Jerome Marchand924bd882011-06-10 15:28:48 +0200884 goto out;
885
Kent Overstreet79886132013-11-23 17:19:00 -0800886 update_position(&index, &offset, &bvec);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +0530887 }
Nitin Gupta306b0c92009-09-22 10:26:53 +0530888
889 set_bit(BIO_UPTODATE, &bio->bi_flags);
890 bio_endio(bio, 0);
Nitin Gupta7d7854b2011-01-22 07:36:15 -0500891 return;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530892
893out:
Nitin Gupta306b0c92009-09-22 10:26:53 +0530894 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530895}
896
Nitin Gupta306b0c92009-09-22 10:26:53 +0530897/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530898 * Handler function for all zram I/O requests.
Nitin Gupta306b0c92009-09-22 10:26:53 +0530899 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200900static void zram_make_request(struct request_queue *queue, struct bio *bio)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530901{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530902 struct zram *zram = queue->queuedata;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530903
Jerome Marchand0900bea2011-09-06 15:02:11 +0200904 down_read(&zram->init_lock);
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700905 if (unlikely(!init_done(zram)))
Minchan Kim3de738c2013-01-30 11:41:41 +0900906 goto error;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200907
karam.lee54850e72014-12-12 16:56:50 -0800908 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
909 bio->bi_iter.bi_size)) {
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800910 atomic64_inc(&zram->stats.invalid_io);
Minchan Kim3de738c2013-01-30 11:41:41 +0900911 goto error;
Jerome Marchand6642a672011-02-17 17:11:49 +0100912 }
913
Sergey Senozhatskybe257c62014-04-07 15:38:01 -0700914 __zram_make_request(zram, bio);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200915 up_read(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530916
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700917 return;
Jerome Marchand0900bea2011-09-06 15:02:11 +0200918
Jerome Marchand0900bea2011-09-06 15:02:11 +0200919error:
Minchan Kim3de738c2013-01-30 11:41:41 +0900920 up_read(&zram->init_lock);
Jerome Marchand0900bea2011-09-06 15:02:11 +0200921 bio_io_error(bio);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530922}
923
Nitin Gupta2ccbec02011-09-09 19:01:00 -0400924static void zram_slot_free_notify(struct block_device *bdev,
925 unsigned long index)
Nitin Gupta107c1612010-05-17 11:02:44 +0530926{
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530927 struct zram *zram;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800928 struct zram_meta *meta;
Nitin Gupta107c1612010-05-17 11:02:44 +0530929
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530930 zram = bdev->bd_disk->private_data;
Minchan Kimf614a9f2014-01-30 15:46:04 -0800931 meta = zram->meta;
932
Weijie Yangd2d5e762014-08-06 16:08:31 -0700933 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kimf614a9f2014-01-30 15:46:04 -0800934 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700935 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jiang Liuda5cc7d2013-06-07 00:07:31 +0800936 atomic64_inc(&zram->stats.notify_free);
Nitin Gupta107c1612010-05-17 11:02:44 +0530937}
938
karam.lee8c7f0102014-12-12 16:56:53 -0800939static int zram_rw_page(struct block_device *bdev, sector_t sector,
940 struct page *page, int rw)
941{
942 int offset, err;
943 u32 index;
944 struct zram *zram;
945 struct bio_vec bv;
946
947 zram = bdev->bd_disk->private_data;
948 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
949 atomic64_inc(&zram->stats.invalid_io);
950 return -EINVAL;
951 }
952
953 down_read(&zram->init_lock);
954 if (unlikely(!init_done(zram))) {
955 err = -EIO;
956 goto out_unlock;
957 }
958
959 index = sector >> SECTORS_PER_PAGE_SHIFT;
960 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
961
962 bv.bv_page = page;
963 bv.bv_len = PAGE_SIZE;
964 bv.bv_offset = 0;
965
966 err = zram_bvec_rw(zram, &bv, index, offset, rw);
967out_unlock:
968 up_read(&zram->init_lock);
969 /*
970 * If I/O fails, just return error(ie, non-zero) without
971 * calling page_endio.
972 * It causes resubmit the I/O with bio request by upper functions
973 * of rw_page(e.g., swap_readpage, __swap_writepage) and
974 * bio->bi_end_io does things to handle the error
975 * (e.g., SetPageError, set_page_dirty and extra works).
976 */
977 if (err == 0)
978 page_endio(page, rw, 0);
979 return err;
980}
981
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530982static const struct block_device_operations zram_devops = {
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530983 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -0800984 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +0530985 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +0530986};
987
Ganesh Mahendran083914e2014-12-12 16:57:13 -0800988static DEVICE_ATTR_RW(disksize);
989static DEVICE_ATTR_RO(initstate);
990static DEVICE_ATTR_WO(reset);
991static DEVICE_ATTR_RO(orig_data_size);
992static DEVICE_ATTR_RO(mem_used_total);
993static DEVICE_ATTR_RW(mem_limit);
994static DEVICE_ATTR_RW(mem_used_max);
995static DEVICE_ATTR_RW(max_comp_streams);
996static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300997
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700998ZRAM_ATTR_RO(num_reads);
999ZRAM_ATTR_RO(num_writes);
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001000ZRAM_ATTR_RO(failed_reads);
1001ZRAM_ATTR_RO(failed_writes);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -07001002ZRAM_ATTR_RO(invalid_io);
1003ZRAM_ATTR_RO(notify_free);
1004ZRAM_ATTR_RO(zero_pages);
1005ZRAM_ATTR_RO(compr_data_size);
1006
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001007static struct attribute *zram_disk_attrs[] = {
1008 &dev_attr_disksize.attr,
1009 &dev_attr_initstate.attr,
1010 &dev_attr_reset.attr,
1011 &dev_attr_num_reads.attr,
1012 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001013 &dev_attr_failed_reads.attr,
1014 &dev_attr_failed_writes.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001015 &dev_attr_invalid_io.attr,
1016 &dev_attr_notify_free.attr,
1017 &dev_attr_zero_pages.attr,
1018 &dev_attr_orig_data_size.attr,
1019 &dev_attr_compr_data_size.attr,
1020 &dev_attr_mem_used_total.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001021 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001022 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001023 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001024 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001025 NULL,
1026};
1027
1028static struct attribute_group zram_disk_attr_group = {
1029 .attrs = zram_disk_attrs,
1030};
1031
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301032static int create_device(struct zram *zram, int device_id)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301033{
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001034 int ret = -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301035
Jerome Marchand0900bea2011-09-06 15:02:11 +02001036 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301037
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301038 zram->queue = blk_alloc_queue(GFP_KERNEL);
1039 if (!zram->queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301040 pr_err("Error allocating disk queue for device %d\n",
1041 device_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301042 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301043 }
1044
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301045 blk_queue_make_request(zram->queue, zram_make_request);
1046 zram->queue->queuedata = zram;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301047
1048 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301049 zram->disk = alloc_disk(1);
1050 if (!zram->disk) {
Sam Hansen94b84352012-06-07 16:03:47 -07001051 pr_warn("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301052 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001053 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301054 }
1055
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301056 zram->disk->major = zram_major;
1057 zram->disk->first_minor = device_id;
1058 zram->disk->fops = &zram_devops;
1059 zram->disk->queue = zram->queue;
1060 zram->disk->private_data = zram;
1061 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301062
Nitin Gupta33863c22010-08-09 22:56:47 +05301063 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301064 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001065 /* zram devices sort of resembles non-rotational disks */
1066 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001067 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301068 /*
1069 * To ensure that we always get PAGE_SIZE aligned
1070 * and n*PAGE_SIZED sized I/O requests.
1071 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301072 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001073 blk_queue_logical_block_size(zram->disk->queue,
1074 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301075 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1076 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001077 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1078 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1079 /*
1080 * zram_bio_discard() will clear all logical blocks if logical block
1081 * size is identical with physical block size(PAGE_SIZE). But if it is
1082 * different, we will skip discarding some parts of logical blocks in
1083 * the part of the request range which isn't aligned to physical block
1084 * size. So we can't ensure that all discarded logical blocks are
1085 * zeroed.
1086 */
1087 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1088 zram->disk->queue->limits.discard_zeroes_data = 1;
1089 else
1090 zram->disk->queue->limits.discard_zeroes_data = 0;
1091 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301092
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301093 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301094
Nitin Gupta33863c22010-08-09 22:56:47 +05301095 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1096 &zram_disk_attr_group);
1097 if (ret < 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001098 pr_warn("Error creating sysfs group");
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001099 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301100 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001101 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001102 zram->meta = NULL;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001103 zram->max_comp_streams = 1;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001104 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301105
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001106out_free_disk:
1107 del_gendisk(zram->disk);
1108 put_disk(zram->disk);
1109out_free_queue:
1110 blk_cleanup_queue(zram->queue);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301111out:
1112 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301113}
1114
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001115static void destroy_devices(unsigned int nr)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301116{
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001117 struct zram *zram;
1118 unsigned int i;
Nitin Gupta33863c22010-08-09 22:56:47 +05301119
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001120 for (i = 0; i < nr; i++) {
1121 zram = &zram_devices[i];
1122 /*
1123 * Remove sysfs first, so no one will perform a disksize
1124 * store while we destroy the devices
1125 */
1126 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1127 &zram_disk_attr_group);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301128
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001129 zram_reset_device(zram);
1130
1131 del_gendisk(zram->disk);
1132 put_disk(zram->disk);
1133
1134 blk_cleanup_queue(zram->queue);
1135 }
1136
1137 kfree(zram_devices);
1138 unregister_blkdev(zram_major, "zram");
1139 pr_info("Destroyed %u device(s)\n", nr);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301140}
1141
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301142static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301143{
Nitin Guptade1a21a2010-01-28 21:13:40 +05301144 int ret, dev_id;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301145
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001146 if (num_devices > max_num_devices) {
Sam Hansen94b84352012-06-07 16:03:47 -07001147 pr_warn("Invalid value for num_devices: %u\n",
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001148 num_devices);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001149 return -EINVAL;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301150 }
1151
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301152 zram_major = register_blkdev(0, "zram");
1153 if (zram_major <= 0) {
Sam Hansen94b84352012-06-07 16:03:47 -07001154 pr_warn("Unable to get major number\n");
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001155 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301156 }
1157
Nitin Gupta306b0c92009-09-22 10:26:53 +05301158 /* Allocate the device array and initialize each one */
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001159 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
Noah Watkins43801f62011-07-20 17:05:57 -06001160 if (!zram_devices) {
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001161 unregister_blkdev(zram_major, "zram");
1162 return -ENOMEM;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301163 }
Nitin Gupta306b0c92009-09-22 10:26:53 +05301164
Nitin Gupta5fa5a902012-02-12 23:04:45 -05001165 for (dev_id = 0; dev_id < num_devices; dev_id++) {
Noah Watkins43801f62011-07-20 17:05:57 -06001166 ret = create_device(&zram_devices[dev_id], dev_id);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301167 if (ret)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001168 goto out_error;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301169 }
1170
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001171 pr_info("Created %u device(s)\n", num_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301172 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301173
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001174out_error:
1175 destroy_devices(dev_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301176 return ret;
1177}
1178
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301179static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301180{
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001181 destroy_devices(num_devices);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301182}
1183
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301184module_init(zram_init);
1185module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301186
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001187module_param(num_devices, uint, 0);
1188MODULE_PARM_DESC(num_devices, "Number of zram devices");
1189
Nitin Gupta306b0c92009-09-22 10:26:53 +05301190MODULE_LICENSE("Dual BSD/GPL");
1191MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301192MODULE_DESCRIPTION("Compressed RAM Block Device");