blob: 7c2308807f6c06addd9acf1037422a12f7e7095a [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070020#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053021#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/buffer_head.h>
24#include <linux/device.h>
25#include <linux/genhd.h>
26#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Minchan Kim2e264fb2017-01-10 16:58:21 -080028#include <linux/backing-dev.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053029#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053030#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070031#include <linux/err.h>
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070032#include <linux/idr.h>
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070033#include <linux/sysfs.h>
Minchan Kimf1dcb852018-06-07 17:05:49 -070034#include <linux/debugfs.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070038static DEFINE_IDR(zram_index_idr);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070039/* idr index must be protected */
40static DEFINE_MUTEX(zram_index_mutex);
41
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053042static int zram_major;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070043static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053044
Nitin Gupta306b0c92009-09-22 10:26:53 +053045/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080046static unsigned int num_devices = 1;
Sergey Senozhatsky797b08572018-04-05 16:24:47 -070047/*
48 * Pages that compress to sizes equals or greater than this are stored
49 * uncompressed in memory.
50 */
51static size_t huge_class_size;
Nitin Gupta33863c22010-08-09 22:56:47 +053052
Minchan Kimf61c5392017-05-03 14:55:41 -070053static void zram_free_page(struct zram *zram, size_t index);
Minchan Kim86d820b2018-12-28 00:36:47 -080054static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
55 u32 index, int offset, struct bio *bio);
56
Minchan Kimf61c5392017-05-03 14:55:41 -070057
Minchan Kim97cebf92018-12-28 00:36:33 -080058static int zram_slot_trylock(struct zram *zram, u32 index)
59{
Minchan Kim75f69c22018-12-28 00:36:40 -080060 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
Minchan Kim97cebf92018-12-28 00:36:33 -080061}
62
Minchan Kime28229032018-06-07 17:05:39 -070063static void zram_slot_lock(struct zram *zram, u32 index)
64{
Minchan Kim75f69c22018-12-28 00:36:40 -080065 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
Minchan Kime28229032018-06-07 17:05:39 -070066}
67
68static void zram_slot_unlock(struct zram *zram, u32 index)
69{
Minchan Kim75f69c22018-12-28 00:36:40 -080070 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
Minchan Kime28229032018-06-07 17:05:39 -070071}
72
Minchan Kim08eee692015-02-12 15:00:45 -080073static inline bool init_done(struct zram *zram)
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070074{
Minchan Kim08eee692015-02-12 15:00:45 -080075 return zram->disksize;
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070076}
77
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030078static inline struct zram *dev_to_zram(struct device *dev)
79{
80 return (struct zram *)dev_to_disk(dev)->private_data;
81}
82
Minchan Kima890b0b2017-05-03 14:55:50 -070083static unsigned long zram_get_handle(struct zram *zram, u32 index)
84{
85 return zram->table[index].handle;
86}
87
88static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
89{
90 zram->table[index].handle = handle;
91}
92
Sergey Senozhatskyb31177f2015-06-25 15:00:16 -070093/* flag operations require table entry bit_spin_lock() being held */
Minchan Kimf1dcb852018-06-07 17:05:49 -070094static bool zram_test_flag(struct zram *zram, u32 index,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070095 enum zram_pageflags flag)
Andrew Morton99ebbd302015-05-05 16:23:25 -070096{
Minchan Kim75f69c22018-12-28 00:36:40 -080097 return zram->table[index].flags & BIT(flag);
Andrew Morton99ebbd302015-05-05 16:23:25 -070098}
99
Minchan Kim6cb89542017-05-03 14:55:47 -0700100static void zram_set_flag(struct zram *zram, u32 index,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700101 enum zram_pageflags flag)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300102{
Minchan Kim75f69c22018-12-28 00:36:40 -0800103 zram->table[index].flags |= BIT(flag);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700104}
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300105
Minchan Kim6cb89542017-05-03 14:55:47 -0700106static void zram_clear_flag(struct zram *zram, u32 index,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700107 enum zram_pageflags flag)
108{
Minchan Kim75f69c22018-12-28 00:36:40 -0800109 zram->table[index].flags &= ~BIT(flag);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700110}
111
Minchan Kim6cb89542017-05-03 14:55:47 -0700112static inline void zram_set_element(struct zram *zram, u32 index,
zhouxianrong74ccaa72017-02-24 14:59:27 -0800113 unsigned long element)
114{
Minchan Kim6cb89542017-05-03 14:55:47 -0700115 zram->table[index].element = element;
zhouxianrong74ccaa72017-02-24 14:59:27 -0800116}
117
Minchan Kima890b0b2017-05-03 14:55:50 -0700118static unsigned long zram_get_element(struct zram *zram, u32 index)
zhouxianrong74ccaa72017-02-24 14:59:27 -0800119{
Minchan Kima890b0b2017-05-03 14:55:50 -0700120 return zram->table[index].element;
zhouxianrong74ccaa72017-02-24 14:59:27 -0800121}
122
Minchan Kim6cb89542017-05-03 14:55:47 -0700123static size_t zram_get_obj_size(struct zram *zram, u32 index)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700124{
Minchan Kim75f69c22018-12-28 00:36:40 -0800125 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700126}
127
Minchan Kim6cb89542017-05-03 14:55:47 -0700128static void zram_set_obj_size(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700129 u32 index, size_t size)
130{
Minchan Kim75f69c22018-12-28 00:36:40 -0800131 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700132
Minchan Kim75f69c22018-12-28 00:36:40 -0800133 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700134}
135
Minchan Kim86d820b2018-12-28 00:36:47 -0800136static inline bool zram_allocated(struct zram *zram, u32 index)
137{
138 return zram_get_obj_size(zram, index) ||
139 zram_test_flag(zram, index, ZRAM_SAME) ||
140 zram_test_flag(zram, index, ZRAM_WB);
141}
142
Minchan Kimf61c5392017-05-03 14:55:41 -0700143#if PAGE_SIZE != 4096
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800144static inline bool is_partial_io(struct bio_vec *bvec)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700145{
146 return bvec->bv_len != PAGE_SIZE;
147}
Minchan Kimf61c5392017-05-03 14:55:41 -0700148#else
149static inline bool is_partial_io(struct bio_vec *bvec)
150{
151 return false;
152}
153#endif
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700154
155/*
156 * Check if request is within bounds and aligned on zram logical blocks.
157 */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800158static inline bool valid_io_request(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700159 sector_t start, unsigned int size)
160{
161 u64 end, bound;
162
163 /* unaligned request */
164 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800165 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700166 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800167 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700168
169 end = start + (size >> SECTOR_SHIFT);
170 bound = zram->disksize >> SECTOR_SHIFT;
171 /* out of range range */
172 if (unlikely(start >= bound || end > bound || start > end))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800173 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700174
175 /* I/O request is valid */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800176 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700177}
178
179static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
180{
Minchan Kime7df4ff2017-05-03 14:55:38 -0700181 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700182 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
183}
184
185static inline void update_used_max(struct zram *zram,
186 const unsigned long pages)
187{
188 unsigned long old_max, cur_max;
189
190 old_max = atomic_long_read(&zram->stats.max_used_pages);
191
192 do {
193 cur_max = old_max;
194 if (pages > cur_max)
195 old_max = atomic_long_cmpxchg(
196 &zram->stats.max_used_pages, cur_max, pages);
197 } while (old_max != cur_max);
198}
199
zhouxianrong74ccaa72017-02-24 14:59:27 -0800200static inline void zram_fill_page(char *ptr, unsigned long len,
201 unsigned long value)
202{
203 int i;
204 unsigned long *page = (unsigned long *)ptr;
205
206 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
207
208 if (likely(value == 0)) {
209 memset(ptr, 0, len);
210 } else {
211 for (i = 0; i < len / sizeof(*page); i++)
212 page[i] = value;
213 }
214}
215
216static bool page_same_filled(void *ptr, unsigned long *element)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700217{
218 unsigned int pos;
219 unsigned long *page;
Sangwoo Park379ceff2017-05-03 14:55:56 -0700220 unsigned long val;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700221
222 page = (unsigned long *)ptr;
Sangwoo Park379ceff2017-05-03 14:55:56 -0700223 val = page[0];
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700224
Sangwoo Park379ceff2017-05-03 14:55:56 -0700225 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
226 if (val != page[pos])
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800227 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700228 }
229
Sangwoo Park379ceff2017-05-03 14:55:56 -0700230 *element = val;
zhouxianrong74ccaa72017-02-24 14:59:27 -0800231
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800232 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700233}
234
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300235static ssize_t initstate_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
237{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700238 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300239 struct zram *zram = dev_to_zram(dev);
240
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700241 down_read(&zram->init_lock);
242 val = init_done(zram);
243 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300244
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700245 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300246}
247
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700248static ssize_t disksize_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct zram *zram = dev_to_zram(dev);
252
253 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
254}
255
Minchan Kim9ada9da2014-10-09 15:29:53 -0700256static ssize_t mem_limit_store(struct device *dev,
257 struct device_attribute *attr, const char *buf, size_t len)
258{
259 u64 limit;
260 char *tmp;
261 struct zram *zram = dev_to_zram(dev);
262
263 limit = memparse(buf, &tmp);
264 if (buf == tmp) /* no chars parsed, invalid input */
265 return -EINVAL;
266
267 down_write(&zram->init_lock);
268 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
269 up_write(&zram->init_lock);
270
271 return len;
272}
273
Minchan Kim461a8ee2014-10-09 15:29:55 -0700274static ssize_t mem_used_max_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t len)
276{
277 int err;
278 unsigned long val;
279 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700280
281 err = kstrtoul(buf, 10, &val);
282 if (err || val != 0)
283 return -EINVAL;
284
285 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700286 if (init_done(zram)) {
Minchan Kim461a8ee2014-10-09 15:29:55 -0700287 atomic_long_set(&zram->stats.max_used_pages,
Minchan Kim6cb89542017-05-03 14:55:47 -0700288 zs_get_total_pages(zram->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700289 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700290 up_read(&zram->init_lock);
291
292 return len;
293}
294
Minchan Kim149be472018-12-28 00:36:44 -0800295static ssize_t idle_store(struct device *dev,
296 struct device_attribute *attr, const char *buf, size_t len)
297{
298 struct zram *zram = dev_to_zram(dev);
299 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
300 int index;
301 char mode_buf[8];
302 ssize_t sz;
303
304 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
305 if (sz <= 0)
306 return -EINVAL;
307
308 /* ignore trailing new line */
309 if (mode_buf[sz - 1] == '\n')
310 mode_buf[sz - 1] = 0x00;
311
312 if (strcmp(mode_buf, "all"))
313 return -EINVAL;
314
315 down_read(&zram->init_lock);
316 if (!init_done(zram)) {
317 up_read(&zram->init_lock);
318 return -EINVAL;
319 }
320
321 for (index = 0; index < nr_pages; index++) {
Minchan Kim86d820b2018-12-28 00:36:47 -0800322 /*
323 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
324 * See the comment in writeback_store.
325 */
Minchan Kim149be472018-12-28 00:36:44 -0800326 zram_slot_lock(zram, index);
Minchan Kimf26c1b22019-01-08 15:22:53 -0800327 if (zram_allocated(zram, index) &&
328 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
329 zram_set_flag(zram, index, ZRAM_IDLE);
Minchan Kim149be472018-12-28 00:36:44 -0800330 zram_slot_unlock(zram, index);
331 }
332
333 up_read(&zram->init_lock);
334
335 return len;
336}
337
Minchan Kim9ac886a2017-09-06 16:19:54 -0700338#ifdef CONFIG_ZRAM_WRITEBACK
Minchan Kimf26c1b22019-01-08 15:22:53 -0800339static ssize_t writeback_limit_enable_store(struct device *dev,
340 struct device_attribute *attr, const char *buf, size_t len)
341{
342 struct zram *zram = dev_to_zram(dev);
343 u64 val;
344 ssize_t ret = -EINVAL;
345
346 if (kstrtoull(buf, 10, &val))
347 return ret;
348
349 down_read(&zram->init_lock);
350 spin_lock(&zram->wb_limit_lock);
351 zram->wb_limit_enable = val;
352 spin_unlock(&zram->wb_limit_lock);
353 up_read(&zram->init_lock);
354 ret = len;
355
356 return ret;
357}
358
359static ssize_t writeback_limit_enable_show(struct device *dev,
360 struct device_attribute *attr, char *buf)
361{
362 bool val;
363 struct zram *zram = dev_to_zram(dev);
364
365 down_read(&zram->init_lock);
366 spin_lock(&zram->wb_limit_lock);
367 val = zram->wb_limit_enable;
368 spin_unlock(&zram->wb_limit_lock);
369 up_read(&zram->init_lock);
370
371 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
372}
373
Minchan Kim2cf97fa2018-12-28 00:36:54 -0800374static ssize_t writeback_limit_store(struct device *dev,
375 struct device_attribute *attr, const char *buf, size_t len)
376{
377 struct zram *zram = dev_to_zram(dev);
378 u64 val;
379 ssize_t ret = -EINVAL;
380
381 if (kstrtoull(buf, 10, &val))
382 return ret;
383
384 down_read(&zram->init_lock);
Minchan Kimf26c1b22019-01-08 15:22:53 -0800385 spin_lock(&zram->wb_limit_lock);
386 zram->bd_wb_limit = val;
387 spin_unlock(&zram->wb_limit_lock);
Minchan Kim2cf97fa2018-12-28 00:36:54 -0800388 up_read(&zram->init_lock);
389 ret = len;
390
391 return ret;
392}
393
394static ssize_t writeback_limit_show(struct device *dev,
395 struct device_attribute *attr, char *buf)
396{
397 u64 val;
398 struct zram *zram = dev_to_zram(dev);
399
400 down_read(&zram->init_lock);
Minchan Kimf26c1b22019-01-08 15:22:53 -0800401 spin_lock(&zram->wb_limit_lock);
402 val = zram->bd_wb_limit;
403 spin_unlock(&zram->wb_limit_lock);
Minchan Kim2cf97fa2018-12-28 00:36:54 -0800404 up_read(&zram->init_lock);
405
406 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
407}
408
Minchan Kim9ac886a2017-09-06 16:19:54 -0700409static void reset_bdev(struct zram *zram)
410{
411 struct block_device *bdev;
412
Minchan Kim75f69c22018-12-28 00:36:40 -0800413 if (!zram->backing_dev)
Minchan Kim9ac886a2017-09-06 16:19:54 -0700414 return;
415
416 bdev = zram->bdev;
417 if (zram->old_block_size)
418 set_blocksize(bdev, zram->old_block_size);
419 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
420 /* hope filp_close flush all of IO */
421 filp_close(zram->backing_dev, NULL);
422 zram->backing_dev = NULL;
423 zram->old_block_size = 0;
424 zram->bdev = NULL;
Minchan Kim0e05f382017-09-06 16:19:57 -0700425
426 kvfree(zram->bitmap);
427 zram->bitmap = NULL;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700428}
429
430static ssize_t backing_dev_show(struct device *dev,
431 struct device_attribute *attr, char *buf)
432{
433 struct zram *zram = dev_to_zram(dev);
434 struct file *file = zram->backing_dev;
435 char *p;
436 ssize_t ret;
437
438 down_read(&zram->init_lock);
Minchan Kim75f69c22018-12-28 00:36:40 -0800439 if (!zram->backing_dev) {
Minchan Kim9ac886a2017-09-06 16:19:54 -0700440 memcpy(buf, "none\n", 5);
441 up_read(&zram->init_lock);
442 return 5;
443 }
444
445 p = file_path(file, buf, PAGE_SIZE - 1);
446 if (IS_ERR(p)) {
447 ret = PTR_ERR(p);
448 goto out;
449 }
450
451 ret = strlen(p);
452 memmove(buf, p, ret);
453 buf[ret++] = '\n';
454out:
455 up_read(&zram->init_lock);
456 return ret;
457}
458
459static ssize_t backing_dev_store(struct device *dev,
460 struct device_attribute *attr, const char *buf, size_t len)
461{
462 char *file_name;
Peter Kalauskas48f90912018-08-21 21:54:02 -0700463 size_t sz;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700464 struct file *backing_dev = NULL;
465 struct inode *inode;
466 struct address_space *mapping;
Minchan Kim0e05f382017-09-06 16:19:57 -0700467 unsigned int bitmap_sz, old_block_size = 0;
468 unsigned long nr_pages, *bitmap = NULL;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700469 struct block_device *bdev = NULL;
470 int err;
471 struct zram *zram = dev_to_zram(dev);
Minchan Kim0e05f382017-09-06 16:19:57 -0700472 gfp_t kmalloc_flags;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700473
474 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
475 if (!file_name)
476 return -ENOMEM;
477
478 down_write(&zram->init_lock);
479 if (init_done(zram)) {
480 pr_info("Can't setup backing device for initialized device\n");
481 err = -EBUSY;
482 goto out;
483 }
484
Peter Kalauskas48f90912018-08-21 21:54:02 -0700485 strlcpy(file_name, buf, PATH_MAX);
486 /* ignore trailing newline */
487 sz = strlen(file_name);
488 if (sz > 0 && file_name[sz - 1] == '\n')
489 file_name[sz - 1] = 0x00;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700490
491 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
492 if (IS_ERR(backing_dev)) {
493 err = PTR_ERR(backing_dev);
494 backing_dev = NULL;
495 goto out;
496 }
497
498 mapping = backing_dev->f_mapping;
499 inode = mapping->host;
500
501 /* Support only block device in this moment */
502 if (!S_ISBLK(inode->i_mode)) {
503 err = -ENOTBLK;
504 goto out;
505 }
506
507 bdev = bdgrab(I_BDEV(inode));
508 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
Minchan Kim04a8efb2018-12-28 00:36:37 -0800509 if (err < 0) {
510 bdev = NULL;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700511 goto out;
Minchan Kim04a8efb2018-12-28 00:36:37 -0800512 }
Minchan Kim9ac886a2017-09-06 16:19:54 -0700513
Minchan Kim0e05f382017-09-06 16:19:57 -0700514 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
515 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
516 kmalloc_flags = GFP_KERNEL | __GFP_ZERO;
517 if (bitmap_sz > PAGE_SIZE)
518 kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY;
519
520 bitmap = kmalloc_node(bitmap_sz, kmalloc_flags, NUMA_NO_NODE);
521 if (!bitmap && bitmap_sz > PAGE_SIZE)
522 bitmap = vzalloc(bitmap_sz);
523
524 if (!bitmap) {
525 err = -ENOMEM;
526 goto out;
527 }
528
Minchan Kim9ac886a2017-09-06 16:19:54 -0700529 old_block_size = block_size(bdev);
530 err = set_blocksize(bdev, PAGE_SIZE);
531 if (err)
532 goto out;
533
534 reset_bdev(zram);
535
536 zram->old_block_size = old_block_size;
537 zram->bdev = bdev;
538 zram->backing_dev = backing_dev;
Minchan Kim0e05f382017-09-06 16:19:57 -0700539 zram->bitmap = bitmap;
540 zram->nr_pages = nr_pages;
Minchan Kim9ac886a2017-09-06 16:19:54 -0700541 up_write(&zram->init_lock);
542
543 pr_info("setup backing device %s\n", file_name);
544 kfree(file_name);
545
546 return len;
547out:
Minchan Kim0e05f382017-09-06 16:19:57 -0700548 if (bitmap)
549 kvfree(bitmap);
550
Minchan Kim9ac886a2017-09-06 16:19:54 -0700551 if (bdev)
552 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
553
554 if (backing_dev)
555 filp_close(backing_dev, NULL);
556
557 up_write(&zram->init_lock);
558
559 kfree(file_name);
560
561 return err;
562}
563
Minchan Kim75f69c22018-12-28 00:36:40 -0800564static unsigned long alloc_block_bdev(struct zram *zram)
Minchan Kim0e05f382017-09-06 16:19:57 -0700565{
Minchan Kim97cebf92018-12-28 00:36:33 -0800566 unsigned long blk_idx = 1;
567retry:
Minchan Kim0e05f382017-09-06 16:19:57 -0700568 /* skip 0 bit to confuse zram.handle = 0 */
Minchan Kim97cebf92018-12-28 00:36:33 -0800569 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
570 if (blk_idx == zram->nr_pages)
Minchan Kim0e05f382017-09-06 16:19:57 -0700571 return 0;
Minchan Kim0e05f382017-09-06 16:19:57 -0700572
Minchan Kim97cebf92018-12-28 00:36:33 -0800573 if (test_and_set_bit(blk_idx, zram->bitmap))
574 goto retry;
Minchan Kim0e05f382017-09-06 16:19:57 -0700575
Minchan Kime1dd5d12018-12-28 00:36:51 -0800576 atomic64_inc(&zram->stats.bd_count);
Minchan Kim97cebf92018-12-28 00:36:33 -0800577 return blk_idx;
Minchan Kim0e05f382017-09-06 16:19:57 -0700578}
579
Minchan Kim75f69c22018-12-28 00:36:40 -0800580static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
Minchan Kim0e05f382017-09-06 16:19:57 -0700581{
582 int was_set;
583
Minchan Kim75f69c22018-12-28 00:36:40 -0800584 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
Minchan Kim0e05f382017-09-06 16:19:57 -0700585 WARN_ON_ONCE(!was_set);
Minchan Kime1dd5d12018-12-28 00:36:51 -0800586 atomic64_dec(&zram->stats.bd_count);
Minchan Kim0e05f382017-09-06 16:19:57 -0700587}
588
Colin Ian King3c77b562017-11-15 17:37:08 -0800589static void zram_page_end_io(struct bio *bio)
Minchan Kim598d0532017-09-06 16:20:03 -0700590{
591 struct page *page = bio->bi_io_vec[0].bv_page;
592
593 page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
594 bio_put(bio);
595}
596
Minchan Kim0a6c1992017-09-06 16:20:07 -0700597/*
598 * Returns 1 if the submission is successful.
599 */
600static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
601 unsigned long entry, struct bio *parent)
602{
603 struct bio *bio;
604
605 bio = bio_alloc(GFP_ATOMIC, 1);
606 if (!bio)
607 return -ENOMEM;
608
609 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
610 bio->bi_bdev = zram->bdev;
611 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
612 bio_put(bio);
613 return -EIO;
614 }
615
616 if (!parent) {
617 bio->bi_opf = REQ_OP_READ;
618 bio->bi_end_io = zram_page_end_io;
619 } else {
620 bio->bi_opf = parent->bi_opf;
621 bio_chain(bio, parent);
622 }
623
624 submit_bio(bio);
625 return 1;
626}
627
Minchan Kimf26c1b22019-01-08 15:22:53 -0800628#define HUGE_WRITEBACK 1
629#define IDLE_WRITEBACK 2
Minchan Kim86d820b2018-12-28 00:36:47 -0800630
631static ssize_t writeback_store(struct device *dev,
632 struct device_attribute *attr, const char *buf, size_t len)
633{
634 struct zram *zram = dev_to_zram(dev);
635 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
636 unsigned long index;
637 struct bio bio;
638 struct page *page;
639 ssize_t ret, sz;
640 char mode_buf[8];
Minchan Kimf26c1b22019-01-08 15:22:53 -0800641 int mode = -1;
Minchan Kim86d820b2018-12-28 00:36:47 -0800642 unsigned long blk_idx = 0;
643
644 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
645 if (sz <= 0)
646 return -EINVAL;
647
648 /* ignore trailing newline */
649 if (mode_buf[sz - 1] == '\n')
650 mode_buf[sz - 1] = 0x00;
651
652 if (!strcmp(mode_buf, "idle"))
653 mode = IDLE_WRITEBACK;
654 else if (!strcmp(mode_buf, "huge"))
655 mode = HUGE_WRITEBACK;
656
Minchan Kimf26c1b22019-01-08 15:22:53 -0800657 if (mode == -1)
Minchan Kim86d820b2018-12-28 00:36:47 -0800658 return -EINVAL;
659
660 down_read(&zram->init_lock);
661 if (!init_done(zram)) {
662 ret = -EINVAL;
663 goto release_init_lock;
664 }
665
666 if (!zram->backing_dev) {
667 ret = -ENODEV;
668 goto release_init_lock;
669 }
670
671 page = alloc_page(GFP_KERNEL);
672 if (!page) {
673 ret = -ENOMEM;
674 goto release_init_lock;
675 }
676
677 for (index = 0; index < nr_pages; index++) {
678 struct bio_vec bvec;
679
680 bvec.bv_page = page;
681 bvec.bv_len = PAGE_SIZE;
682 bvec.bv_offset = 0;
683
Minchan Kimf26c1b22019-01-08 15:22:53 -0800684 spin_lock(&zram->wb_limit_lock);
685 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
686 spin_unlock(&zram->wb_limit_lock);
Minchan Kim2cf97fa2018-12-28 00:36:54 -0800687 ret = -EIO;
688 break;
689 }
Minchan Kimf26c1b22019-01-08 15:22:53 -0800690 spin_unlock(&zram->wb_limit_lock);
Minchan Kim2cf97fa2018-12-28 00:36:54 -0800691
Minchan Kim86d820b2018-12-28 00:36:47 -0800692 if (!blk_idx) {
693 blk_idx = alloc_block_bdev(zram);
694 if (!blk_idx) {
695 ret = -ENOSPC;
696 break;
697 }
698 }
699
700 zram_slot_lock(zram, index);
701 if (!zram_allocated(zram, index))
702 goto next;
703
704 if (zram_test_flag(zram, index, ZRAM_WB) ||
705 zram_test_flag(zram, index, ZRAM_SAME) ||
706 zram_test_flag(zram, index, ZRAM_UNDER_WB))
707 goto next;
708
Minchan Kimf26c1b22019-01-08 15:22:53 -0800709 if (mode == IDLE_WRITEBACK &&
710 !zram_test_flag(zram, index, ZRAM_IDLE))
711 goto next;
712 if (mode == HUGE_WRITEBACK &&
713 !zram_test_flag(zram, index, ZRAM_HUGE))
Minchan Kim86d820b2018-12-28 00:36:47 -0800714 goto next;
715 /*
716 * Clearing ZRAM_UNDER_WB is duty of caller.
717 * IOW, zram_free_page never clear it.
718 */
719 zram_set_flag(zram, index, ZRAM_UNDER_WB);
720 /* Need for hugepage writeback racing */
721 zram_set_flag(zram, index, ZRAM_IDLE);
722 zram_slot_unlock(zram, index);
723 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
724 zram_slot_lock(zram, index);
725 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
726 zram_clear_flag(zram, index, ZRAM_IDLE);
727 zram_slot_unlock(zram, index);
728 continue;
729 }
730
731 bio_init(&bio);
732
733 bio.bi_max_vecs = 1;
734 bio.bi_io_vec = &bvec;
735 bio.bi_bdev = zram->bdev;
736
737 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
738 bio_set_op_attrs(&bio, REQ_OP_WRITE, REQ_SYNC);
739 bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
740 bvec.bv_offset);
741 /*
742 * XXX: A single page IO would be inefficient for write
743 * but it would be not bad as starter.
744 */
745 ret = submit_bio_wait(&bio);
746 if (ret) {
747 zram_slot_lock(zram, index);
748 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
749 zram_clear_flag(zram, index, ZRAM_IDLE);
750 zram_slot_unlock(zram, index);
751 continue;
752 }
753
Minchan Kime1dd5d12018-12-28 00:36:51 -0800754 atomic64_inc(&zram->stats.bd_writes);
Minchan Kim86d820b2018-12-28 00:36:47 -0800755 /*
756 * We released zram_slot_lock so need to check if the slot was
757 * changed. If there is freeing for the slot, we can catch it
758 * easily by zram_allocated.
759 * A subtle case is the slot is freed/reallocated/marked as
760 * ZRAM_IDLE again. To close the race, idle_store doesn't
761 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
762 * Thus, we could close the race by checking ZRAM_IDLE bit.
763 */
764 zram_slot_lock(zram, index);
765 if (!zram_allocated(zram, index) ||
766 !zram_test_flag(zram, index, ZRAM_IDLE)) {
767 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
768 zram_clear_flag(zram, index, ZRAM_IDLE);
769 goto next;
770 }
771
772 zram_free_page(zram, index);
773 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
774 zram_set_flag(zram, index, ZRAM_WB);
775 zram_set_element(zram, index, blk_idx);
776 blk_idx = 0;
777 atomic64_inc(&zram->stats.pages_stored);
Minchan Kimf26c1b22019-01-08 15:22:53 -0800778 spin_lock(&zram->wb_limit_lock);
779 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
780 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
781 spin_unlock(&zram->wb_limit_lock);
Minchan Kim86d820b2018-12-28 00:36:47 -0800782next:
783 zram_slot_unlock(zram, index);
784 }
785
786 if (blk_idx)
787 free_block_bdev(zram, blk_idx);
788 ret = len;
789 __free_page(page);
790release_init_lock:
791 up_read(&zram->init_lock);
792
793 return ret;
794}
795
Minchan Kim0a6c1992017-09-06 16:20:07 -0700796struct zram_work {
797 struct work_struct work;
798 struct zram *zram;
799 unsigned long entry;
800 struct bio *bio;
801};
802
803#if PAGE_SIZE != 4096
804static void zram_sync_read(struct work_struct *work)
805{
806 struct bio_vec bvec;
807 struct zram_work *zw = container_of(work, struct zram_work, work);
808 struct zram *zram = zw->zram;
809 unsigned long entry = zw->entry;
810 struct bio *bio = zw->bio;
811
812 read_from_bdev_async(zram, &bvec, entry, bio);
813}
814
815/*
816 * Block layer want one ->make_request_fn to be active at a time
817 * so if we use chained IO with parent IO in same context,
818 * it's a deadlock. To avoid, it, it uses worker thread context.
819 */
820static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
821 unsigned long entry, struct bio *bio)
822{
823 struct zram_work work;
824
825 work.zram = zram;
826 work.entry = entry;
827 work.bio = bio;
828
829 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
830 queue_work(system_unbound_wq, &work.work);
831 flush_work(&work.work);
832 destroy_work_on_stack(&work.work);
833
834 return 1;
835}
836#else
837static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
838 unsigned long entry, struct bio *bio)
839{
840 WARN_ON(1);
841 return -EIO;
842}
843#endif
844
845static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
846 unsigned long entry, struct bio *parent, bool sync)
847{
Minchan Kime1dd5d12018-12-28 00:36:51 -0800848 atomic64_inc(&zram->stats.bd_reads);
Minchan Kim0a6c1992017-09-06 16:20:07 -0700849 if (sync)
850 return read_from_bdev_sync(zram, bvec, entry, parent);
851 else
852 return read_from_bdev_async(zram, bvec, entry, parent);
853}
Minchan Kim9ac886a2017-09-06 16:19:54 -0700854#else
Minchan Kim9ac886a2017-09-06 16:19:54 -0700855static inline void reset_bdev(struct zram *zram) {};
Minchan Kim0a6c1992017-09-06 16:20:07 -0700856static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
857 unsigned long entry, struct bio *parent, bool sync)
858{
859 return -EIO;
860}
Minchan Kim75f69c22018-12-28 00:36:40 -0800861
862static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
Minchan Kim9ac886a2017-09-06 16:19:54 -0700863#endif
864
Minchan Kimf1dcb852018-06-07 17:05:49 -0700865#ifdef CONFIG_ZRAM_MEMORY_TRACKING
866
867static struct dentry *zram_debugfs_root;
868
869static void zram_debugfs_create(void)
870{
871 zram_debugfs_root = debugfs_create_dir("zram", NULL);
872}
873
874static void zram_debugfs_destroy(void)
875{
876 debugfs_remove_recursive(zram_debugfs_root);
877}
878
879static void zram_accessed(struct zram *zram, u32 index)
880{
Minchan Kim149be472018-12-28 00:36:44 -0800881 zram_clear_flag(zram, index, ZRAM_IDLE);
Minchan Kimf1dcb852018-06-07 17:05:49 -0700882 zram->table[index].ac_time = ktime_get_boottime();
883}
884
Minchan Kimf1dcb852018-06-07 17:05:49 -0700885static ssize_t read_block_state(struct file *file, char __user *buf,
886 size_t count, loff_t *ppos)
887{
888 char *kbuf;
889 ssize_t index, written = 0;
890 struct zram *zram = file->private_data;
891 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
892 struct timespec64 ts;
893
894 gfp_t kmalloc_flags;
895
896 kmalloc_flags = GFP_KERNEL;
897 if (count > PAGE_SIZE)
898 kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY;
899
900 kbuf = kmalloc_node(count, kmalloc_flags, NUMA_NO_NODE);
901 if (!kbuf && count > PAGE_SIZE)
902 kbuf = vmalloc(count);
903 if (!kbuf)
904 return -ENOMEM;
905
906 down_read(&zram->init_lock);
907 if (!init_done(zram)) {
908 up_read(&zram->init_lock);
909 kvfree(kbuf);
910 return -EINVAL;
911 }
912
913 for (index = *ppos; index < nr_pages; index++) {
914 int copied;
915
916 zram_slot_lock(zram, index);
917 if (!zram_allocated(zram, index))
918 goto next;
919
920 ts = ktime_to_timespec64(zram->table[index].ac_time);
921 copied = snprintf(kbuf + written, count,
Minchan Kim149be472018-12-28 00:36:44 -0800922 "%12zd %12lld.%06lu %c%c%c%c\n",
Minchan Kimf1dcb852018-06-07 17:05:49 -0700923 index, (s64)ts.tv_sec,
924 ts.tv_nsec / NSEC_PER_USEC,
925 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
926 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
Minchan Kim149be472018-12-28 00:36:44 -0800927 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
928 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
Minchan Kimf1dcb852018-06-07 17:05:49 -0700929
930 if (count < copied) {
931 zram_slot_unlock(zram, index);
932 break;
933 }
934 written += copied;
935 count -= copied;
936next:
937 zram_slot_unlock(zram, index);
938 *ppos += 1;
939 }
940
941 up_read(&zram->init_lock);
942 if (copy_to_user(buf, kbuf, written))
943 written = -EFAULT;
944 kvfree(kbuf);
945
946 return written;
947}
948
949static const struct file_operations proc_zram_block_state_op = {
950 .open = simple_open,
951 .read = read_block_state,
952 .llseek = default_llseek,
953};
954
955static void zram_debugfs_register(struct zram *zram)
956{
957 if (!zram_debugfs_root)
958 return;
959
960 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
961 zram_debugfs_root);
962 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
963 zram, &proc_zram_block_state_op);
964}
965
966static void zram_debugfs_unregister(struct zram *zram)
967{
968 debugfs_remove_recursive(zram->debugfs_dir);
969}
970#else
971static void zram_debugfs_create(void) {};
972static void zram_debugfs_destroy(void) {};
Minchan Kim149be472018-12-28 00:36:44 -0800973static void zram_accessed(struct zram *zram, u32 index)
974{
975 zram_clear_flag(zram, index, ZRAM_IDLE);
976};
Minchan Kimf1dcb852018-06-07 17:05:49 -0700977static void zram_debugfs_register(struct zram *zram) {};
978static void zram_debugfs_unregister(struct zram *zram) {};
979#endif
Minchan Kim9ac886a2017-09-06 16:19:54 -0700980
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700981/*
982 * We switched to per-cpu streams and this attr is not needed anymore.
983 * However, we will keep it around for some time, because:
984 * a) we may revert per-cpu streams in the future
985 * b) it's visible to user space and we need to follow our 2 years
986 * retirement rule; but we already have a number of 'soon to be
987 * altered' attrs, so max_comp_streams need to wait for the next
988 * layoff cycle.
989 */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700990static ssize_t max_comp_streams_show(struct device *dev,
991 struct device_attribute *attr, char *buf)
992{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700993 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700994}
995
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700996static ssize_t max_comp_streams_store(struct device *dev,
997 struct device_attribute *attr, const char *buf, size_t len)
998{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700999 return len;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001000}
1001
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001002static ssize_t comp_algorithm_show(struct device *dev,
1003 struct device_attribute *attr, char *buf)
1004{
1005 size_t sz;
1006 struct zram *zram = dev_to_zram(dev);
1007
1008 down_read(&zram->init_lock);
1009 sz = zcomp_available_show(zram->compressor, buf);
1010 up_read(&zram->init_lock);
1011
1012 return sz;
1013}
1014
1015static ssize_t comp_algorithm_store(struct device *dev,
1016 struct device_attribute *attr, const char *buf, size_t len)
1017{
1018 struct zram *zram = dev_to_zram(dev);
Matthias Kaehlcke520d10d32017-08-10 15:24:29 -07001019 char compressor[ARRAY_SIZE(zram->compressor)];
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -07001020 size_t sz;
1021
Sergey Senozhatsky415403b2016-07-26 15:22:48 -07001022 strlcpy(compressor, buf, sizeof(compressor));
1023 /* ignore trailing newline */
1024 sz = strlen(compressor);
1025 if (sz > 0 && compressor[sz - 1] == '\n')
1026 compressor[sz - 1] = 0x00;
1027
1028 if (!zcomp_available_algorithm(compressor))
Luis Henriques1d5b43b2015-11-06 16:29:01 -08001029 return -EINVAL;
1030
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001031 down_write(&zram->init_lock);
1032 if (init_done(zram)) {
1033 up_write(&zram->init_lock);
1034 pr_info("Can't change algorithm for initialized device\n");
1035 return -EBUSY;
1036 }
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -07001037
Matthias Kaehlcke520d10d32017-08-10 15:24:29 -07001038 strcpy(zram->compressor, compressor);
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001039 up_write(&zram->init_lock);
1040 return len;
1041}
1042
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001043static ssize_t compact_store(struct device *dev,
1044 struct device_attribute *attr, const char *buf, size_t len)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301045{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001046 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001047
1048 down_read(&zram->init_lock);
1049 if (!init_done(zram)) {
1050 up_read(&zram->init_lock);
1051 return -EINVAL;
1052 }
1053
Minchan Kim6cb89542017-05-03 14:55:47 -07001054 zs_compact(zram->mem_pool);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001055 up_read(&zram->init_lock);
1056
1057 return len;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301058}
1059
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001060static ssize_t io_stat_show(struct device *dev,
1061 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301062{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001063 struct zram *zram = dev_to_zram(dev);
1064 ssize_t ret;
1065
1066 down_read(&zram->init_lock);
1067 ret = scnprintf(buf, PAGE_SIZE,
1068 "%8llu %8llu %8llu %8llu\n",
1069 (u64)atomic64_read(&zram->stats.failed_reads),
1070 (u64)atomic64_read(&zram->stats.failed_writes),
1071 (u64)atomic64_read(&zram->stats.invalid_io),
1072 (u64)atomic64_read(&zram->stats.notify_free));
1073 up_read(&zram->init_lock);
1074
1075 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301076}
1077
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001078static ssize_t mm_stat_show(struct device *dev,
1079 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301080{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001081 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -07001082 struct zs_pool_stats pool_stats;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001083 u64 orig_size, mem_used = 0;
1084 long max_used;
1085 ssize_t ret;
1086
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -07001087 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1088
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001089 down_read(&zram->init_lock);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -07001090 if (init_done(zram)) {
Minchan Kim6cb89542017-05-03 14:55:47 -07001091 mem_used = zs_get_total_pages(zram->mem_pool);
1092 zs_pool_stats(zram->mem_pool, &pool_stats);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -07001093 }
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001094
1095 orig_size = atomic64_read(&zram->stats.pages_stored);
1096 max_used = atomic_long_read(&zram->stats.max_used_pages);
1097
1098 ret = scnprintf(buf, PAGE_SIZE,
Minchan Kimf185f712018-06-07 17:05:42 -07001099 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001100 orig_size << PAGE_SHIFT,
1101 (u64)atomic64_read(&zram->stats.compr_data_size),
1102 mem_used << PAGE_SHIFT,
1103 zram->limit_pages << PAGE_SHIFT,
1104 max_used << PAGE_SHIFT,
zhouxianrong74ccaa72017-02-24 14:59:27 -08001105 (u64)atomic64_read(&zram->stats.same_pages),
Minchan Kimf185f712018-06-07 17:05:42 -07001106 pool_stats.pages_compacted,
1107 (u64)atomic64_read(&zram->stats.huge_pages));
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001108 up_read(&zram->init_lock);
1109
1110 return ret;
Weijie Yangd2d5e762014-08-06 16:08:31 -07001111}
1112
Minchan Kime1dd5d12018-12-28 00:36:51 -08001113#ifdef CONFIG_ZRAM_WRITEBACK
Minchan Kim2cf97fa2018-12-28 00:36:54 -08001114#define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
Minchan Kime1dd5d12018-12-28 00:36:51 -08001115static ssize_t bd_stat_show(struct device *dev,
1116 struct device_attribute *attr, char *buf)
1117{
1118 struct zram *zram = dev_to_zram(dev);
1119 ssize_t ret;
1120
1121 down_read(&zram->init_lock);
1122 ret = scnprintf(buf, PAGE_SIZE,
1123 "%8llu %8llu %8llu\n",
Minchan Kim2cf97fa2018-12-28 00:36:54 -08001124 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1125 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1126 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
Minchan Kime1dd5d12018-12-28 00:36:51 -08001127 up_read(&zram->init_lock);
1128
1129 return ret;
1130}
1131#endif
1132
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001133static ssize_t debug_stat_show(struct device *dev,
1134 struct device_attribute *attr, char *buf)
1135{
1136 int version = 1;
1137 struct zram *zram = dev_to_zram(dev);
1138 ssize_t ret;
1139
1140 down_read(&zram->init_lock);
1141 ret = scnprintf(buf, PAGE_SIZE,
Minchan Kim97cebf92018-12-28 00:36:33 -08001142 "version: %d\n%8llu %8llu\n",
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001143 version,
Minchan Kim97cebf92018-12-28 00:36:33 -08001144 (u64)atomic64_read(&zram->stats.writestall),
1145 (u64)atomic64_read(&zram->stats.miss_free));
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001146 up_read(&zram->init_lock);
1147
1148 return ret;
1149}
1150
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001151static DEVICE_ATTR_RO(io_stat);
1152static DEVICE_ATTR_RO(mm_stat);
Minchan Kime1dd5d12018-12-28 00:36:51 -08001153#ifdef CONFIG_ZRAM_WRITEBACK
1154static DEVICE_ATTR_RO(bd_stat);
1155#endif
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001156static DEVICE_ATTR_RO(debug_stat);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001157
Minchan Kim6cb89542017-05-03 14:55:47 -07001158static void zram_meta_free(struct zram *zram, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001159{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -08001160 size_t num_pages = disksize >> PAGE_SHIFT;
1161 size_t index;
1162
1163 /* Free all pages that are still in this zram device */
Minchan Kimffa3b812017-05-03 14:55:53 -07001164 for (index = 0; index < num_pages; index++)
1165 zram_free_page(zram, index);
Ganesh Mahendran1fec1172015-02-12 15:00:33 -08001166
Minchan Kim6cb89542017-05-03 14:55:47 -07001167 zs_destroy_pool(zram->mem_pool);
1168 vfree(zram->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001169}
1170
Minchan Kim6cb89542017-05-03 14:55:47 -07001171static bool zram_meta_alloc(struct zram *zram, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001172{
1173 size_t num_pages;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001174
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001175 num_pages = disksize >> PAGE_SHIFT;
Minchan Kim6cb89542017-05-03 14:55:47 -07001176 zram->table = vzalloc(num_pages * sizeof(*zram->table));
1177 if (!zram->table)
1178 return false;
1179
1180 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1181 if (!zram->mem_pool) {
1182 vfree(zram->table);
1183 return false;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001184 }
1185
Sergey Senozhatsky797b08572018-04-05 16:24:47 -07001186 if (!huge_class_size)
1187 huge_class_size = zs_huge_class_size(zram->mem_pool);
Minchan Kim6cb89542017-05-03 14:55:47 -07001188 return true;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001189}
1190
Weijie Yangd2d5e762014-08-06 16:08:31 -07001191/*
1192 * To protect concurrent access to the same index entry,
1193 * caller should hold this table index entry's bit_spinlock to
1194 * indicate this index entry is accessing.
1195 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301196static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301197{
Minchan Kim598d0532017-09-06 16:20:03 -07001198 unsigned long handle;
1199
Minchan Kim75f69c22018-12-28 00:36:40 -08001200#ifdef CONFIG_ZRAM_MEMORY_TRACKING
Srinivas Paladugu5aa04bc2019-02-01 16:57:03 -08001201 zram->table[index].ac_time.tv64 = 0;
Minchan Kim75f69c22018-12-28 00:36:40 -08001202#endif
Minchan Kim149be472018-12-28 00:36:44 -08001203 if (zram_test_flag(zram, index, ZRAM_IDLE))
1204 zram_clear_flag(zram, index, ZRAM_IDLE);
1205
Minchan Kimf185f712018-06-07 17:05:42 -07001206 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1207 zram_clear_flag(zram, index, ZRAM_HUGE);
1208 atomic64_dec(&zram->stats.huge_pages);
1209 }
1210
Minchan Kim75f69c22018-12-28 00:36:40 -08001211 if (zram_test_flag(zram, index, ZRAM_WB)) {
1212 zram_clear_flag(zram, index, ZRAM_WB);
1213 free_block_bdev(zram, zram_get_element(zram, index));
1214 goto out;
Minchan Kim598d0532017-09-06 16:20:03 -07001215 }
Nitin Gupta306b0c92009-09-22 10:26:53 +05301216
zhouxianrong74ccaa72017-02-24 14:59:27 -08001217 /*
1218 * No memory is allocated for same element filled pages.
1219 * Simply clear same page flag.
1220 */
Minchan Kim6cb89542017-05-03 14:55:47 -07001221 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1222 zram_clear_flag(zram, index, ZRAM_SAME);
zhouxianrong74ccaa72017-02-24 14:59:27 -08001223 atomic64_dec(&zram->stats.same_pages);
Minchan Kim75f69c22018-12-28 00:36:40 -08001224 goto out;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301225 }
1226
Minchan Kim598d0532017-09-06 16:20:03 -07001227 handle = zram_get_handle(zram, index);
zhouxianrong74ccaa72017-02-24 14:59:27 -08001228 if (!handle)
1229 return;
1230
Minchan Kim6cb89542017-05-03 14:55:47 -07001231 zs_free(zram->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301232
Minchan Kim6cb89542017-05-03 14:55:47 -07001233 atomic64_sub(zram_get_obj_size(zram, index),
Weijie Yangd2d5e762014-08-06 16:08:31 -07001234 &zram->stats.compr_data_size);
Minchan Kim75f69c22018-12-28 00:36:40 -08001235out:
Sergey Senozhatsky90a78062014-04-07 15:38:03 -07001236 atomic64_dec(&zram->stats.pages_stored);
Minchan Kima890b0b2017-05-03 14:55:50 -07001237 zram_set_handle(zram, index, 0);
Minchan Kim6cb89542017-05-03 14:55:47 -07001238 zram_set_obj_size(zram, index, 0);
Minchan Kim86d820b2018-12-28 00:36:47 -08001239 WARN_ON_ONCE(zram->table[index].flags &
1240 ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
Nitin Gupta306b0c92009-09-22 10:26:53 +05301241}
1242
Minchan Kim0a6c1992017-09-06 16:20:07 -07001243static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1244 struct bio *bio, bool partial_io)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301245{
Minchan Kimf61c5392017-05-03 14:55:41 -07001246 int ret;
Minchan Kim92967472014-01-30 15:46:03 -08001247 unsigned long handle;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -07001248 unsigned int size;
Minchan Kimf61c5392017-05-03 14:55:41 -07001249 void *src, *dst;
Minchan Kimf61c5392017-05-03 14:55:41 -07001250
Minchan Kim75f69c22018-12-28 00:36:40 -08001251 zram_slot_lock(zram, index);
1252 if (zram_test_flag(zram, index, ZRAM_WB)) {
1253 struct bio_vec bvec;
Minchan Kim0a6c1992017-09-06 16:20:07 -07001254
Minchan Kim0a6c1992017-09-06 16:20:07 -07001255 zram_slot_unlock(zram, index);
Minchan Kim75f69c22018-12-28 00:36:40 -08001256
1257 bvec.bv_page = page;
1258 bvec.bv_len = PAGE_SIZE;
1259 bvec.bv_offset = 0;
1260 return read_from_bdev(zram, &bvec,
1261 zram_get_element(zram, index),
1262 bio, partial_io);
Minchan Kim0a6c1992017-09-06 16:20:07 -07001263 }
1264
Minchan Kima890b0b2017-05-03 14:55:50 -07001265 handle = zram_get_handle(zram, index);
Minchan Kim856f5342017-10-03 16:15:19 -07001266 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1267 unsigned long value;
1268 void *mem;
1269
1270 value = handle ? zram_get_element(zram, index) : 0;
1271 mem = kmap_atomic(page);
1272 zram_fill_page(mem, PAGE_SIZE, value);
1273 kunmap_atomic(mem);
1274 zram_slot_unlock(zram, index);
1275 return 0;
1276 }
1277
Minchan Kim6cb89542017-05-03 14:55:47 -07001278 size = zram_get_obj_size(zram, index);
Jerome Marchand924bd882011-06-10 15:28:48 +02001279
Minchan Kim6cb89542017-05-03 14:55:47 -07001280 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -07001281 if (size == PAGE_SIZE) {
Minchan Kimf61c5392017-05-03 14:55:41 -07001282 dst = kmap_atomic(page);
1283 memcpy(dst, src, PAGE_SIZE);
1284 kunmap_atomic(dst);
1285 ret = 0;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -07001286 } else {
1287 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
1288
Minchan Kimf61c5392017-05-03 14:55:41 -07001289 dst = kmap_atomic(page);
1290 ret = zcomp_decompress(zstrm, src, size, dst);
1291 kunmap_atomic(dst);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -07001292 zcomp_stream_put(zram->comp);
1293 }
Minchan Kim6cb89542017-05-03 14:55:47 -07001294 zs_unmap_object(zram->mem_pool, handle);
Minchan Kim425db412017-05-03 14:55:44 -07001295 zram_slot_unlock(zram, index);
Jerome Marchand924bd882011-06-10 15:28:48 +02001296
1297 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001298 if (unlikely(ret))
Minchan Kimf61c5392017-05-03 14:55:41 -07001299 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +03001300
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +03001301 return ret;
1302}
1303
Minchan Kimf61c5392017-05-03 14:55:41 -07001304static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
Minchan Kim0a6c1992017-09-06 16:20:07 -07001305 u32 index, int offset, struct bio *bio)
Jerome Marchand924bd882011-06-10 15:28:48 +02001306{
Minchan Kimf61c5392017-05-03 14:55:41 -07001307 int ret;
Minchan Kim130f3152012-06-08 15:39:27 +09001308 struct page *page;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001309
1310 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +02001311 if (is_partial_io(bvec)) {
Minchan Kimf61c5392017-05-03 14:55:41 -07001312 /* Use a temporary buffer to decompress the page */
1313 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1314 if (!page)
1315 return -ENOMEM;
Jerome Marchand924bd882011-06-10 15:28:48 +02001316 }
1317
Minchan Kim0a6c1992017-09-06 16:20:07 -07001318 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
Minchan Kimf61c5392017-05-03 14:55:41 -07001319 if (unlikely(ret))
1320 goto out;
1321
1322 if (is_partial_io(bvec)) {
1323 void *dst = kmap_atomic(bvec->bv_page);
1324 void *src = kmap_atomic(page);
1325
1326 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
1327 kunmap_atomic(src);
1328 kunmap_atomic(dst);
1329 }
1330out:
1331 if (is_partial_io(bvec))
1332 __free_page(page);
1333
1334 return ret;
1335}
1336
Minchan Kim598d0532017-09-06 16:20:03 -07001337static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1338 u32 index, struct bio *bio)
Minchan Kimf61c5392017-05-03 14:55:41 -07001339{
Minchan Kimb53858b2017-09-06 16:20:00 -07001340 int ret = 0;
Minchan Kimf61c5392017-05-03 14:55:41 -07001341 unsigned long alloced_pages;
1342 unsigned long handle = 0;
Minchan Kim56e03b02017-09-06 16:19:47 -07001343 unsigned int comp_len = 0;
1344 void *src, *dst, *mem;
1345 struct zcomp_strm *zstrm;
1346 struct page *page = bvec->bv_page;
1347 unsigned long element = 0;
1348 enum zram_pageflags flags = 0;
1349
1350 mem = kmap_atomic(page);
1351 if (page_same_filled(mem, &element)) {
1352 kunmap_atomic(mem);
1353 /* Free memory associated with this sector now. */
1354 flags = ZRAM_SAME;
1355 atomic64_inc(&zram->stats.same_pages);
1356 goto out;
1357 }
1358 kunmap_atomic(mem);
Minchan Kimf61c5392017-05-03 14:55:41 -07001359
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001360compress_again:
Minchan Kim56e03b02017-09-06 16:19:47 -07001361 zstrm = zcomp_stream_get(zram->comp);
Minchan Kimf61c5392017-05-03 14:55:41 -07001362 src = kmap_atomic(page);
Minchan Kim56e03b02017-09-06 16:19:47 -07001363 ret = zcomp_compress(zstrm, src, &comp_len);
Minchan Kimf61c5392017-05-03 14:55:41 -07001364 kunmap_atomic(src);
Jerome Marchand8c921b22011-06-10 15:28:47 +02001365
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001366 if (unlikely(ret)) {
Minchan Kim56e03b02017-09-06 16:19:47 -07001367 zcomp_stream_put(zram->comp);
Jerome Marchand8c921b22011-06-10 15:28:47 +02001368 pr_err("Compression failed! err=%d\n", ret);
Minchan Kim56e03b02017-09-06 16:19:47 -07001369 zs_free(zram->mem_pool, handle);
Minchan Kimf61c5392017-05-03 14:55:41 -07001370 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001371 }
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001372
Minchan Kim86d820b2018-12-28 00:36:47 -08001373 if (comp_len >= huge_class_size)
Peter Kalauskas109a48e2018-11-08 11:16:03 -08001374 comp_len = PAGE_SIZE;
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001375 /*
1376 * handle allocation has 2 paths:
1377 * a) fast path is executed with preemption disabled (for
1378 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1379 * since we can't sleep;
1380 * b) slow path enables preemption and attempts to allocate
1381 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1382 * put per-cpu compression stream and, thus, to re-do
1383 * the compression once handle is allocated.
1384 *
1385 * if we have a 'non-null' handle here then we are coming
1386 * from the slow path and handle has already been allocated.
1387 */
1388 if (!handle)
Minchan Kim6cb89542017-05-03 14:55:47 -07001389 handle = zs_malloc(zram->mem_pool, comp_len,
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001390 __GFP_KSWAPD_RECLAIM |
1391 __GFP_NOWARN |
Minchan Kim9bc482d2016-07-26 15:23:34 -07001392 __GFP_HIGHMEM |
1393 __GFP_MOVABLE);
Nitin Guptafd1a30d2012-01-09 16:51:59 -06001394 if (!handle) {
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -07001395 zcomp_stream_put(zram->comp);
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001396 atomic64_inc(&zram->stats.writestall);
Minchan Kim6cb89542017-05-03 14:55:47 -07001397 handle = zs_malloc(zram->mem_pool, comp_len,
Minchan Kim9bc482d2016-07-26 15:23:34 -07001398 GFP_NOIO | __GFP_HIGHMEM |
1399 __GFP_MOVABLE);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001400 if (handle)
1401 goto compress_again;
Minchan Kimf61c5392017-05-03 14:55:41 -07001402 return -ENOMEM;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001403 }
Minchan Kim9ada9da2014-10-09 15:29:53 -07001404
Minchan Kim6cb89542017-05-03 14:55:47 -07001405 alloced_pages = zs_get_total_pages(zram->mem_pool);
Sergey SENOZHATSKY12372752015-11-06 16:29:04 -08001406 update_used_max(zram, alloced_pages);
1407
Minchan Kim461a8ee2014-10-09 15:29:55 -07001408 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim56e03b02017-09-06 16:19:47 -07001409 zcomp_stream_put(zram->comp);
Minchan Kim6cb89542017-05-03 14:55:47 -07001410 zs_free(zram->mem_pool, handle);
Minchan Kimf61c5392017-05-03 14:55:41 -07001411 return -ENOMEM;
Minchan Kim9ada9da2014-10-09 15:29:53 -07001412 }
1413
Minchan Kim6cb89542017-05-03 14:55:47 -07001414 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
Minchan Kimf61c5392017-05-03 14:55:41 -07001415
1416 src = zstrm->buffer;
1417 if (comp_len == PAGE_SIZE)
Nitin Gupta397c6062013-01-02 08:53:41 -08001418 src = kmap_atomic(page);
Minchan Kimf61c5392017-05-03 14:55:41 -07001419 memcpy(dst, src, comp_len);
1420 if (comp_len == PAGE_SIZE)
Nitin Gupta397c6062013-01-02 08:53:41 -08001421 kunmap_atomic(src);
Jerome Marchand8c921b22011-06-10 15:28:47 +02001422
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -07001423 zcomp_stream_put(zram->comp);
Minchan Kim6cb89542017-05-03 14:55:47 -07001424 zs_unmap_object(zram->mem_pool, handle);
Minchan Kim2b3d8662017-09-06 16:19:44 -07001425 atomic64_add(comp_len, &zram->stats.compr_data_size);
1426out:
Sunghan Suhf40ac2a2013-07-03 20:10:05 +09001427 /*
1428 * Free memory associated with this sector
1429 * before overwriting unused sectors.
1430 */
Minchan Kim425db412017-05-03 14:55:44 -07001431 zram_slot_lock(zram, index);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +09001432 zram_free_page(zram, index);
Minchan Kim598d0532017-09-06 16:20:03 -07001433
Minchan Kimf185f712018-06-07 17:05:42 -07001434 if (comp_len == PAGE_SIZE) {
1435 zram_set_flag(zram, index, ZRAM_HUGE);
1436 atomic64_inc(&zram->stats.huge_pages);
1437 }
1438
Minchan Kim598d0532017-09-06 16:20:03 -07001439 if (flags) {
1440 zram_set_flag(zram, index, flags);
Minchan Kim2b3d8662017-09-06 16:19:44 -07001441 zram_set_element(zram, index, element);
Minchan Kim598d0532017-09-06 16:20:03 -07001442 } else {
Minchan Kim2b3d8662017-09-06 16:19:44 -07001443 zram_set_handle(zram, index, handle);
1444 zram_set_obj_size(zram, index, comp_len);
1445 }
Minchan Kim425db412017-05-03 14:55:44 -07001446 zram_slot_unlock(zram, index);
Jerome Marchand8c921b22011-06-10 15:28:47 +02001447
1448 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -07001449 atomic64_inc(&zram->stats.pages_stored);
Minchan Kimb53858b2017-09-06 16:20:00 -07001450 return ret;
Minchan Kimf61c5392017-05-03 14:55:41 -07001451}
1452
1453static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
Minchan Kim598d0532017-09-06 16:20:03 -07001454 u32 index, int offset, struct bio *bio)
Minchan Kimf61c5392017-05-03 14:55:41 -07001455{
1456 int ret;
1457 struct page *page = NULL;
1458 void *src;
1459 struct bio_vec vec;
1460
1461 vec = *bvec;
1462 if (is_partial_io(bvec)) {
1463 void *dst;
1464 /*
1465 * This is a partial IO. We need to read the full page
1466 * before to write the changes.
1467 */
1468 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1469 if (!page)
1470 return -ENOMEM;
1471
Minchan Kim0a6c1992017-09-06 16:20:07 -07001472 ret = __zram_bvec_read(zram, page, index, bio, true);
Minchan Kimf61c5392017-05-03 14:55:41 -07001473 if (ret)
1474 goto out;
1475
1476 src = kmap_atomic(bvec->bv_page);
1477 dst = kmap_atomic(page);
1478 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1479 kunmap_atomic(dst);
1480 kunmap_atomic(src);
1481
1482 vec.bv_page = page;
1483 vec.bv_len = PAGE_SIZE;
1484 vec.bv_offset = 0;
1485 }
1486
Minchan Kim598d0532017-09-06 16:20:03 -07001487 ret = __zram_bvec_write(zram, &vec, index, bio);
Jerome Marchand924bd882011-06-10 15:28:48 +02001488out:
Nitin Gupta397c6062013-01-02 08:53:41 -08001489 if (is_partial_io(bvec))
Minchan Kimf61c5392017-05-03 14:55:41 -07001490 __free_page(page);
Jerome Marchand924bd882011-06-10 15:28:48 +02001491 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001492}
1493
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001494/*
1495 * zram_bio_discard - handler on discard request
1496 * @index: physical block index in PAGE_SIZE units
1497 * @offset: byte offset within physical block
1498 */
1499static void zram_bio_discard(struct zram *zram, u32 index,
1500 int offset, struct bio *bio)
1501{
1502 size_t n = bio->bi_iter.bi_size;
1503
1504 /*
1505 * zram manages data in physical block size units. Because logical block
1506 * size isn't identical with physical block size on some arch, we
1507 * could get a discard request pointing to a specific offset within a
1508 * certain physical block. Although we can handle this request by
1509 * reading that physiclal block and decompressing and partially zeroing
1510 * and re-compressing and then re-storing it, this isn't reasonable
1511 * because our intent with a discard request is to save memory. So
1512 * skipping this logical block is appropriate here.
1513 */
1514 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -07001515 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001516 return;
1517
Weijie Yang38515c72014-06-04 16:11:06 -07001518 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001519 index++;
1520 }
1521
1522 while (n >= PAGE_SIZE) {
Minchan Kim425db412017-05-03 14:55:44 -07001523 zram_slot_lock(zram, index);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001524 zram_free_page(zram, index);
Minchan Kim425db412017-05-03 14:55:44 -07001525 zram_slot_unlock(zram, index);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -07001526 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001527 index++;
1528 n -= PAGE_SIZE;
1529 }
1530}
1531
Minchan Kimb53858b2017-09-06 16:20:00 -07001532/*
1533 * Returns errno if it has some problem. Otherwise return 0 or 1.
1534 * Returns 0 if IO request was done synchronously
1535 * Returns 1 if IO request was successfully submitted.
1536 */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001537static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Minchan Kim598d0532017-09-06 16:20:03 -07001538 int offset, bool is_write, struct bio *bio)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001539{
1540 unsigned long start_time = jiffies;
Jens Axboec11f0c02016-08-05 08:11:04 -06001541 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001542 int ret;
1543
Jens Axboec11f0c02016-08-05 08:11:04 -06001544 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001545 &zram->disk->part0);
1546
Jens Axboec11f0c02016-08-05 08:11:04 -06001547 if (!is_write) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001548 atomic64_inc(&zram->stats.num_reads);
Minchan Kim0a6c1992017-09-06 16:20:07 -07001549 ret = zram_bvec_read(zram, bvec, index, offset, bio);
Minchan Kimf61c5392017-05-03 14:55:41 -07001550 flush_dcache_page(bvec->bv_page);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001551 } else {
1552 atomic64_inc(&zram->stats.num_writes);
Minchan Kim598d0532017-09-06 16:20:03 -07001553 ret = zram_bvec_write(zram, bvec, index, offset, bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001554 }
1555
Jens Axboec11f0c02016-08-05 08:11:04 -06001556 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001557
Minchan Kim754f94b2018-06-07 17:05:45 -07001558 zram_slot_lock(zram, index);
1559 zram_accessed(zram, index);
1560 zram_slot_unlock(zram, index);
1561
Minchan Kimb53858b2017-09-06 16:20:00 -07001562 if (unlikely(ret < 0)) {
Jens Axboec11f0c02016-08-05 08:11:04 -06001563 if (!is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001564 atomic64_inc(&zram->stats.failed_reads);
1565 else
1566 atomic64_inc(&zram->stats.failed_writes);
1567 }
1568
1569 return ret;
1570}
1571
1572static void __zram_make_request(struct zram *zram, struct bio *bio)
1573{
Mike Christieabf54542016-08-04 14:23:34 -06001574 int offset;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001575 u32 index;
1576 struct bio_vec bvec;
1577 struct bvec_iter iter;
1578
1579 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1580 offset = (bio->bi_iter.bi_sector &
1581 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1582
Mike Christie95fe6c12016-06-05 14:31:48 -05001583 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001584 zram_bio_discard(zram, index, offset, bio);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001585 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001586 return;
1587 }
1588
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001589 bio_for_each_segment(bvec, bio, iter) {
Minchan Kime7df4ff2017-05-03 14:55:38 -07001590 struct bio_vec bv = bvec;
1591 unsigned int unwritten = bvec.bv_len;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001592
Minchan Kime7df4ff2017-05-03 14:55:38 -07001593 do {
1594 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1595 unwritten);
Mike Christieabf54542016-08-04 14:23:34 -06001596 if (zram_bvec_rw(zram, &bv, index, offset,
Minchan Kim598d0532017-09-06 16:20:03 -07001597 op_is_write(bio_op(bio)), bio) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001598 goto out;
1599
Minchan Kime7df4ff2017-05-03 14:55:38 -07001600 bv.bv_offset += bv.bv_len;
1601 unwritten -= bv.bv_len;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001602
Minchan Kime7df4ff2017-05-03 14:55:38 -07001603 update_position(&index, &offset, &bv);
1604 } while (unwritten);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001605 }
1606
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001607 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001608 return;
1609
1610out:
1611 bio_io_error(bio);
1612}
1613
1614/*
1615 * Handler function for all zram I/O requests.
1616 */
Jens Axboedece1632015-11-05 10:41:16 -07001617static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001618{
1619 struct zram *zram = queue->queuedata;
1620
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001621 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1622 bio->bi_iter.bi_size)) {
1623 atomic64_inc(&zram->stats.invalid_io);
Minchan Kima73779c2017-02-24 14:56:47 -08001624 goto error;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001625 }
1626
1627 __zram_make_request(zram, bio);
Jens Axboedece1632015-11-05 10:41:16 -07001628 return BLK_QC_T_NONE;
Minchan Kima73779c2017-02-24 14:56:47 -08001629
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001630error:
1631 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001632 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001633}
1634
1635static void zram_slot_free_notify(struct block_device *bdev,
1636 unsigned long index)
1637{
1638 struct zram *zram;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001639
1640 zram = bdev->bd_disk->private_data;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001641
Minchan Kim97cebf92018-12-28 00:36:33 -08001642 atomic64_inc(&zram->stats.notify_free);
1643 if (!zram_slot_trylock(zram, index)) {
1644 atomic64_inc(&zram->stats.miss_free);
1645 return;
1646 }
1647
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001648 zram_free_page(zram, index);
Minchan Kim425db412017-05-03 14:55:44 -07001649 zram_slot_unlock(zram, index);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001650}
1651
1652static int zram_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -06001653 struct page *page, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001654{
Minchan Kimb53858b2017-09-06 16:20:00 -07001655 int offset, ret;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001656 u32 index;
1657 struct zram *zram;
1658 struct bio_vec bv;
1659
1660 zram = bdev->bd_disk->private_data;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001661
1662 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1663 atomic64_inc(&zram->stats.invalid_io);
Minchan Kimb53858b2017-09-06 16:20:00 -07001664 ret = -EINVAL;
Minchan Kima73779c2017-02-24 14:56:47 -08001665 goto out;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001666 }
1667
1668 index = sector >> SECTORS_PER_PAGE_SHIFT;
Minchan Kim7d53d472017-04-13 14:56:35 -07001669 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001670
1671 bv.bv_page = page;
1672 bv.bv_len = PAGE_SIZE;
1673 bv.bv_offset = 0;
1674
Minchan Kim598d0532017-09-06 16:20:03 -07001675 ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001676out:
1677 /*
1678 * If I/O fails, just return error(ie, non-zero) without
1679 * calling page_endio.
1680 * It causes resubmit the I/O with bio request by upper functions
1681 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1682 * bio->bi_end_io does things to handle the error
1683 * (e.g., SetPageError, set_page_dirty and extra works).
1684 */
Minchan Kimb53858b2017-09-06 16:20:00 -07001685 if (unlikely(ret < 0))
1686 return ret;
1687
1688 switch (ret) {
1689 case 0:
Jens Axboec11f0c02016-08-05 08:11:04 -06001690 page_endio(page, is_write, 0);
Minchan Kimb53858b2017-09-06 16:20:00 -07001691 break;
1692 case 1:
1693 ret = 0;
1694 break;
1695 default:
1696 WARN_ON(1);
1697 }
1698 return ret;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001699}
1700
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001701static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +02001702{
Minchan Kim08eee692015-02-12 15:00:45 -08001703 struct zcomp *comp;
1704 u64 disksize;
1705
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001706 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -07001707
1708 zram->limit_pages = 0;
1709
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001710 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001711 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001712 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001713 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001714
Minchan Kim08eee692015-02-12 15:00:45 -08001715 comp = zram->comp;
1716 disksize = zram->disksize;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001717 zram->disksize = 0;
Weijie Yangd7ad41a2015-06-10 11:14:49 -07001718
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001719 set_capacity(zram->disk, 0);
Weijie Yangd7ad41a2015-06-10 11:14:49 -07001720 part_stat_set_all(&zram->disk->part0, 0);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001721
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001722 up_write(&zram->init_lock);
Minchan Kim08eee692015-02-12 15:00:45 -08001723 /* I/O operation under all of CPU are done so let's free */
Minchan Kim6cb89542017-05-03 14:55:47 -07001724 zram_meta_free(zram, disksize);
Minchan Kimffa3b812017-05-03 14:55:53 -07001725 memset(&zram->stats, 0, sizeof(zram->stats));
Minchan Kim08eee692015-02-12 15:00:45 -08001726 zcomp_destroy(comp);
Minchan Kim9ac886a2017-09-06 16:19:54 -07001727 reset_bdev(zram);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001728}
1729
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001730static ssize_t disksize_store(struct device *dev,
1731 struct device_attribute *attr, const char *buf, size_t len)
1732{
1733 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001734 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001735 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001736 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001737
1738 disksize = memparse(buf, NULL);
1739 if (!disksize)
1740 return -EINVAL;
1741
Minchan Kim6cb89542017-05-03 14:55:47 -07001742 down_write(&zram->init_lock);
1743 if (init_done(zram)) {
1744 pr_info("Cannot change disksize for initialized device\n");
1745 err = -EBUSY;
1746 goto out_unlock;
1747 }
1748
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001749 disksize = PAGE_ALIGN(disksize);
Minchan Kim6cb89542017-05-03 14:55:47 -07001750 if (!zram_meta_alloc(zram, disksize)) {
1751 err = -ENOMEM;
1752 goto out_unlock;
1753 }
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001754
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001755 comp = zcomp_create(zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001756 if (IS_ERR(comp)) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001757 pr_err("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001758 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001759 err = PTR_ERR(comp);
1760 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001761 }
1762
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001763 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001764 zram->disksize = disksize;
1765 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Minchan Kim34a17b12017-11-15 17:32:56 -08001766
1767 revalidate_disk(zram->disk);
Minchan Kimad4764b2017-01-10 16:58:18 -08001768 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -07001769
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001770 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001771
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001772out_free_meta:
Minchan Kim6cb89542017-05-03 14:55:47 -07001773 zram_meta_free(zram, disksize);
1774out_unlock:
1775 up_write(&zram->init_lock);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001776 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001777}
1778
1779static ssize_t reset_store(struct device *dev,
1780 struct device_attribute *attr, const char *buf, size_t len)
1781{
1782 int ret;
1783 unsigned short do_reset;
1784 struct zram *zram;
1785 struct block_device *bdev;
1786
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001787 ret = kstrtou16(buf, 10, &do_reset);
1788 if (ret)
1789 return ret;
1790
1791 if (!do_reset)
1792 return -EINVAL;
1793
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001794 zram = dev_to_zram(dev);
1795 bdev = bdget_disk(zram->disk, 0);
Rashika Kheria46a51c82013-10-30 18:36:32 +05301796 if (!bdev)
1797 return -ENOMEM;
1798
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001799 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001800 /* Do not reset an active device or claimed device */
1801 if (bdev->bd_openers || zram->claim) {
1802 mutex_unlock(&bdev->bd_mutex);
1803 bdput(bdev);
1804 return -EBUSY;
Rashika Kheria1b672222013-11-10 22:13:53 +05301805 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001806
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001807 /* From now on, anyone can't open /dev/zram[0-9] */
1808 zram->claim = true;
1809 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001810
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001811 /* Make sure all the pending I/O are finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +05301812 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001813 zram_reset_device(zram);
Minchan Kim34a17b12017-11-15 17:32:56 -08001814 revalidate_disk(zram->disk);
Rashika Kheria1b672222013-11-10 22:13:53 +05301815 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001816
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001817 mutex_lock(&bdev->bd_mutex);
1818 zram->claim = false;
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001819 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001820
1821 return len;
1822}
1823
1824static int zram_open(struct block_device *bdev, fmode_t mode)
1825{
1826 int ret = 0;
1827 struct zram *zram;
1828
1829 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1830
1831 zram = bdev->bd_disk->private_data;
1832 /* zram was claimed to reset so open request fails */
1833 if (zram->claim)
1834 ret = -EBUSY;
1835
Rashika Kheria1b672222013-11-10 22:13:53 +05301836 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001837}
1838
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301839static const struct block_device_operations zram_devops = {
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001840 .open = zram_open,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301841 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -08001842 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +05301843 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +05301844};
1845
Andrew Morton99ebbd302015-05-05 16:23:25 -07001846static DEVICE_ATTR_WO(compact);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001847static DEVICE_ATTR_RW(disksize);
1848static DEVICE_ATTR_RO(initstate);
1849static DEVICE_ATTR_WO(reset);
Sergey Senozhatskyf29eb692017-02-22 15:46:45 -08001850static DEVICE_ATTR_WO(mem_limit);
1851static DEVICE_ATTR_WO(mem_used_max);
Minchan Kim149be472018-12-28 00:36:44 -08001852static DEVICE_ATTR_WO(idle);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001853static DEVICE_ATTR_RW(max_comp_streams);
1854static DEVICE_ATTR_RW(comp_algorithm);
Minchan Kim9ac886a2017-09-06 16:19:54 -07001855#ifdef CONFIG_ZRAM_WRITEBACK
1856static DEVICE_ATTR_RW(backing_dev);
Minchan Kim86d820b2018-12-28 00:36:47 -08001857static DEVICE_ATTR_WO(writeback);
Minchan Kim2cf97fa2018-12-28 00:36:54 -08001858static DEVICE_ATTR_RW(writeback_limit);
Minchan Kimf26c1b22019-01-08 15:22:53 -08001859static DEVICE_ATTR_RW(writeback_limit_enable);
Minchan Kim9ac886a2017-09-06 16:19:54 -07001860#endif
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001861
1862static struct attribute *zram_disk_attrs[] = {
1863 &dev_attr_disksize.attr,
1864 &dev_attr_initstate.attr,
1865 &dev_attr_reset.attr,
Andrew Morton99ebbd302015-05-05 16:23:25 -07001866 &dev_attr_compact.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001867 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001868 &dev_attr_mem_used_max.attr,
Minchan Kim149be472018-12-28 00:36:44 -08001869 &dev_attr_idle.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001870 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001871 &dev_attr_comp_algorithm.attr,
Minchan Kim9ac886a2017-09-06 16:19:54 -07001872#ifdef CONFIG_ZRAM_WRITEBACK
1873 &dev_attr_backing_dev.attr,
Minchan Kim86d820b2018-12-28 00:36:47 -08001874 &dev_attr_writeback.attr,
Minchan Kim2cf97fa2018-12-28 00:36:54 -08001875 &dev_attr_writeback_limit.attr,
Minchan Kimf26c1b22019-01-08 15:22:53 -08001876 &dev_attr_writeback_limit_enable.attr,
Minchan Kim9ac886a2017-09-06 16:19:54 -07001877#endif
Sergey Senozhatsky2f6a3be2015-04-15 16:16:03 -07001878 &dev_attr_io_stat.attr,
Sergey Senozhatsky4f2109f2015-04-15 16:16:06 -07001879 &dev_attr_mm_stat.attr,
Minchan Kime1dd5d12018-12-28 00:36:51 -08001880#ifdef CONFIG_ZRAM_WRITEBACK
1881 &dev_attr_bd_stat.attr,
1882#endif
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001883 &dev_attr_debug_stat.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001884 NULL,
1885};
1886
Arvind Yadav15a54fc2017-07-10 15:50:15 -07001887static const struct attribute_group zram_disk_attr_group = {
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001888 .attrs = zram_disk_attrs,
1889};
1890
Minchan Kim553a5612018-11-23 15:28:02 +09001891static const struct attribute_group *zram_disk_attr_groups[] = {
1892 &zram_disk_attr_group,
1893 NULL,
1894};
1895
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001896/*
1897 * Allocate and initialize new zram device. the function returns
1898 * '>= 0' device_id upon success, and negative value otherwise.
1899 */
1900static int zram_add(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301901{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001902 struct zram *zram;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001903 struct request_queue *queue;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001904 int ret, device_id;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001905
1906 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1907 if (!zram)
1908 return -ENOMEM;
1909
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001910 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001911 if (ret < 0)
1912 goto out_free_dev;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001913 device_id = ret;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301914
Jerome Marchand0900bea2011-09-06 15:02:11 +02001915 init_rwsem(&zram->init_lock);
Minchan Kimf26c1b22019-01-08 15:22:53 -08001916#ifdef CONFIG_ZRAM_WRITEBACK
1917 spin_lock_init(&zram->wb_limit_lock);
1918#endif
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001919 queue = blk_alloc_queue(GFP_KERNEL);
1920 if (!queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301921 pr_err("Error allocating disk queue for device %d\n",
1922 device_id);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001923 ret = -ENOMEM;
1924 goto out_free_idr;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301925 }
1926
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001927 blk_queue_make_request(queue, zram_make_request);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301928
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001929 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301930 zram->disk = alloc_disk(1);
1931 if (!zram->disk) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001932 pr_err("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301933 device_id);
Julia Lawall201c7b72015-04-15 16:16:27 -07001934 ret = -ENOMEM;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001935 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301936 }
1937
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301938 zram->disk->major = zram_major;
1939 zram->disk->first_minor = device_id;
1940 zram->disk->fops = &zram_devops;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001941 zram->disk->queue = queue;
1942 zram->disk->queue->queuedata = zram;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301943 zram->disk->private_data = zram;
1944 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301945
Nitin Gupta33863c22010-08-09 22:56:47 +05301946 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301947 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001948 /* zram devices sort of resembles non-rotational disks */
1949 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001950 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Minchan Kim34a17b12017-11-15 17:32:56 -08001951
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301952 /*
1953 * To ensure that we always get PAGE_SIZE aligned
1954 * and n*PAGE_SIZED sized I/O requests.
1955 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301956 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001957 blk_queue_logical_block_size(zram->disk->queue,
1958 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301959 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1960 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001961 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001962 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001963 /*
1964 * zram_bio_discard() will clear all logical blocks if logical block
1965 * size is identical with physical block size(PAGE_SIZE). But if it is
1966 * different, we will skip discarding some parts of logical blocks in
1967 * the part of the request range which isn't aligned to physical block
1968 * size. So we can't ensure that all discarded logical blocks are
1969 * zeroed.
1970 */
1971 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1972 zram->disk->queue->limits.discard_zeroes_data = 1;
1973 else
1974 zram->disk->queue->limits.discard_zeroes_data = 0;
1975 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301976
Minchan Kim34a17b12017-11-15 17:32:56 -08001977 zram->disk->queue->backing_dev_info.capabilities |=
1978 BDI_CAP_STABLE_WRITES;
Greg Kroah-Hartman8fe42842018-11-27 18:57:51 +01001979
Minchan Kim553a5612018-11-23 15:28:02 +09001980 disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301981 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301982
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001983 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskyd12b63c2015-06-25 15:00:14 -07001984
Minchan Kimf1dcb852018-06-07 17:05:49 -07001985 zram_debugfs_register(zram);
Sergey Senozhatskyd12b63c2015-06-25 15:00:14 -07001986 pr_info("Added device: %s\n", zram->disk->disk_name);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001987 return device_id;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301988
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001989out_free_queue:
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001990 blk_cleanup_queue(queue);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001991out_free_idr:
1992 idr_remove(&zram_index_idr, device_id);
1993out_free_dev:
1994 kfree(zram);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301995 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301996}
1997
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001998static int zram_remove(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301999{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002000 struct block_device *bdev;
2001
2002 bdev = bdget_disk(zram->disk, 0);
2003 if (!bdev)
2004 return -ENOMEM;
2005
2006 mutex_lock(&bdev->bd_mutex);
2007 if (bdev->bd_openers || zram->claim) {
2008 mutex_unlock(&bdev->bd_mutex);
2009 bdput(bdev);
2010 return -EBUSY;
2011 }
2012
2013 zram->claim = true;
2014 mutex_unlock(&bdev->bd_mutex);
2015
Minchan Kimf1dcb852018-06-07 17:05:49 -07002016 zram_debugfs_unregister(zram);
Nitin Gupta33863c22010-08-09 22:56:47 +05302017
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002018 /* Make sure all the pending I/O are finished */
2019 fsync_bdev(bdev);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002020 zram_reset_device(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002021 bdput(bdev);
2022
2023 pr_info("Removed device: %s\n", zram->disk->disk_name);
2024
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002025 del_gendisk(zram->disk);
Bart Van Assche8aaf4402018-02-28 10:15:30 -08002026 blk_cleanup_queue(zram->disk->queue);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002027 put_disk(zram->disk);
2028 kfree(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002029 return 0;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002030}
Nitin Gupta306b0c92009-09-22 10:26:53 +05302031
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002032/* zram-control sysfs attributes */
2033static ssize_t hot_add_show(struct class *class,
2034 struct class_attribute *attr,
2035 char *buf)
2036{
2037 int ret;
2038
2039 mutex_lock(&zram_index_mutex);
2040 ret = zram_add();
2041 mutex_unlock(&zram_index_mutex);
2042
2043 if (ret < 0)
2044 return ret;
2045 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2046}
2047
2048static ssize_t hot_remove_store(struct class *class,
2049 struct class_attribute *attr,
2050 const char *buf,
2051 size_t count)
2052{
2053 struct zram *zram;
2054 int ret, dev_id;
2055
2056 /* dev_id is gendisk->first_minor, which is `int' */
2057 ret = kstrtoint(buf, 10, &dev_id);
2058 if (ret)
2059 return ret;
2060 if (dev_id < 0)
2061 return -EINVAL;
2062
2063 mutex_lock(&zram_index_mutex);
2064
2065 zram = idr_find(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08002066 if (zram) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002067 ret = zram_remove(zram);
Takashi Iwai529e71e2016-11-30 15:54:08 -08002068 if (!ret)
2069 idr_remove(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08002070 } else {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002071 ret = -ENODEV;
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08002072 }
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002073
2074 mutex_unlock(&zram_index_mutex);
2075 return ret ? ret : count;
2076}
2077
Sergey Senozhatsky5c7e9cc2016-12-07 14:44:31 -08002078/*
2079 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2080 * sense that reading from this file does alter the state of your system -- it
2081 * creates a new un-initialized zram device and returns back this device's
2082 * device_id (or an error code if it fails to create a new device).
2083 */
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002084static struct class_attribute zram_control_class_attrs[] = {
Sergey Senozhatsky5c7e9cc2016-12-07 14:44:31 -08002085 __ATTR(hot_add, 0400, hot_add_show, NULL),
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002086 __ATTR_WO(hot_remove),
2087 __ATTR_NULL,
2088};
2089
2090static struct class zram_control_class = {
2091 .name = "zram-control",
2092 .owner = THIS_MODULE,
2093 .class_attrs = zram_control_class_attrs,
2094};
2095
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002096static int zram_remove_cb(int id, void *ptr, void *data)
2097{
2098 zram_remove(ptr);
2099 return 0;
2100}
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08002101
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002102static void destroy_devices(void)
2103{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002104 class_unregister(&zram_control_class);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002105 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
Minchan Kimf1dcb852018-06-07 17:05:49 -07002106 zram_debugfs_destroy();
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002107 idr_destroy(&zram_index_idr);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08002108 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +05302109}
2110
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302111static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05302112{
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07002113 int ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05302114
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002115 ret = class_register(&zram_control_class);
2116 if (ret) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07002117 pr_err("Unable to register zram-control class\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002118 return ret;
2119 }
2120
Minchan Kimf1dcb852018-06-07 17:05:49 -07002121 zram_debugfs_create();
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302122 zram_major = register_blkdev(0, "zram");
2123 if (zram_major <= 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07002124 pr_err("Unable to get major number\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002125 class_unregister(&zram_control_class);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08002126 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05302127 }
2128
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07002129 while (num_devices != 0) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002130 mutex_lock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07002131 ret = zram_add();
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07002132 mutex_unlock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07002133 if (ret < 0)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08002134 goto out_error;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07002135 num_devices--;
Nitin Guptade1a21a2010-01-28 21:13:40 +05302136 }
2137
Nitin Gupta306b0c92009-09-22 10:26:53 +05302138 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05302139
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08002140out_error:
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002141 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05302142 return ret;
2143}
2144
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302145static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05302146{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07002147 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05302148}
2149
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302150module_init(zram_init);
2151module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05302152
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03002153module_param(num_devices, uint, 0);
Sergey Senozhatskyc3cdb402015-06-25 15:00:11 -07002154MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03002155
Nitin Gupta306b0c92009-09-22 10:26:53 +05302156MODULE_LICENSE("Dual BSD/GPL");
2157MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302158MODULE_DESCRIPTION("Compressed RAM Block Device");