blob: 856d5dc02451d44b59695127994017877cd02b38 [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070020#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053021#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/buffer_head.h>
24#include <linux/device.h>
25#include <linux/genhd.h>
26#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Minchan Kimb09ab052017-01-10 16:58:21 -080028#include <linux/backing-dev.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053029#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053030#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070031#include <linux/err.h>
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070032#include <linux/idr.h>
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070033#include <linux/sysfs.h>
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +010034#include <linux/cpuhotplug.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070038static DEFINE_IDR(zram_index_idr);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070039/* idr index must be protected */
40static DEFINE_MUTEX(zram_index_mutex);
41
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053042static int zram_major;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070043static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053044
Nitin Gupta306b0c92009-09-22 10:26:53 +053045/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080046static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053047
Minchan Kim1f7319c2017-05-03 14:55:41 -070048static void zram_free_page(struct zram *zram, size_t index);
49
Minchan Kim08eee692015-02-12 15:00:45 -080050static inline bool init_done(struct zram *zram)
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070051{
Minchan Kim08eee692015-02-12 15:00:45 -080052 return zram->disksize;
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070053}
54
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030055static inline struct zram *dev_to_zram(struct device *dev)
56{
57 return (struct zram *)dev_to_disk(dev)->private_data;
58}
59
Minchan Kim643ae612017-05-03 14:55:50 -070060static unsigned long zram_get_handle(struct zram *zram, u32 index)
61{
62 return zram->table[index].handle;
63}
64
65static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
66{
67 zram->table[index].handle = handle;
68}
69
Sergey Senozhatskyb31177f2015-06-25 15:00:16 -070070/* flag operations require table entry bit_spin_lock() being held */
Minchan Kimbeb66022017-05-03 14:55:47 -070071static int zram_test_flag(struct zram *zram, u32 index,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070072 enum zram_pageflags flag)
Andrew Morton99ebbd32015-05-05 16:23:25 -070073{
Minchan Kimbeb66022017-05-03 14:55:47 -070074 return zram->table[index].value & BIT(flag);
Andrew Morton99ebbd32015-05-05 16:23:25 -070075}
76
Minchan Kimbeb66022017-05-03 14:55:47 -070077static void zram_set_flag(struct zram *zram, u32 index,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070078 enum zram_pageflags flag)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030079{
Minchan Kimbeb66022017-05-03 14:55:47 -070080 zram->table[index].value |= BIT(flag);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070081}
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030082
Minchan Kimbeb66022017-05-03 14:55:47 -070083static void zram_clear_flag(struct zram *zram, u32 index,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070084 enum zram_pageflags flag)
85{
Minchan Kimbeb66022017-05-03 14:55:47 -070086 zram->table[index].value &= ~BIT(flag);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070087}
88
Minchan Kimbeb66022017-05-03 14:55:47 -070089static inline void zram_set_element(struct zram *zram, u32 index,
zhouxianrong8e19d542017-02-24 14:59:27 -080090 unsigned long element)
91{
Minchan Kimbeb66022017-05-03 14:55:47 -070092 zram->table[index].element = element;
zhouxianrong8e19d542017-02-24 14:59:27 -080093}
94
Minchan Kim643ae612017-05-03 14:55:50 -070095static unsigned long zram_get_element(struct zram *zram, u32 index)
zhouxianrong8e19d542017-02-24 14:59:27 -080096{
Minchan Kim643ae612017-05-03 14:55:50 -070097 return zram->table[index].element;
zhouxianrong8e19d542017-02-24 14:59:27 -080098}
99
Minchan Kimbeb66022017-05-03 14:55:47 -0700100static size_t zram_get_obj_size(struct zram *zram, u32 index)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700101{
Minchan Kimbeb66022017-05-03 14:55:47 -0700102 return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700103}
104
Minchan Kimbeb66022017-05-03 14:55:47 -0700105static void zram_set_obj_size(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700106 u32 index, size_t size)
107{
Minchan Kimbeb66022017-05-03 14:55:47 -0700108 unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700109
Minchan Kimbeb66022017-05-03 14:55:47 -0700110 zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700111}
112
Minchan Kim1f7319c2017-05-03 14:55:41 -0700113#if PAGE_SIZE != 4096
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800114static inline bool is_partial_io(struct bio_vec *bvec)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700115{
116 return bvec->bv_len != PAGE_SIZE;
117}
Minchan Kim1f7319c2017-05-03 14:55:41 -0700118#else
119static inline bool is_partial_io(struct bio_vec *bvec)
120{
121 return false;
122}
123#endif
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700124
Minchan Kimb09ab052017-01-10 16:58:21 -0800125static void zram_revalidate_disk(struct zram *zram)
126{
127 revalidate_disk(zram->disk);
128 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
Jens Axboee1735492017-02-02 16:53:07 -0700129 zram->disk->queue->backing_dev_info->capabilities |=
Minchan Kimb09ab052017-01-10 16:58:21 -0800130 BDI_CAP_STABLE_WRITES;
131}
132
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700133/*
134 * Check if request is within bounds and aligned on zram logical blocks.
135 */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800136static inline bool valid_io_request(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700137 sector_t start, unsigned int size)
138{
139 u64 end, bound;
140
141 /* unaligned request */
142 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800143 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700144 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800145 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700146
147 end = start + (size >> SECTOR_SHIFT);
148 bound = zram->disksize >> SECTOR_SHIFT;
149 /* out of range range */
150 if (unlikely(start >= bound || end > bound || start > end))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800151 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700152
153 /* I/O request is valid */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800154 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700155}
156
157static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
158{
Minchan Kime86942c2017-05-03 14:55:38 -0700159 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700160 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
161}
162
163static inline void update_used_max(struct zram *zram,
164 const unsigned long pages)
165{
166 unsigned long old_max, cur_max;
167
168 old_max = atomic_long_read(&zram->stats.max_used_pages);
169
170 do {
171 cur_max = old_max;
172 if (pages > cur_max)
173 old_max = atomic_long_cmpxchg(
174 &zram->stats.max_used_pages, cur_max, pages);
175 } while (old_max != cur_max);
176}
177
zhouxianrong8e19d542017-02-24 14:59:27 -0800178static inline void zram_fill_page(char *ptr, unsigned long len,
179 unsigned long value)
180{
181 int i;
182 unsigned long *page = (unsigned long *)ptr;
183
184 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
185
186 if (likely(value == 0)) {
187 memset(ptr, 0, len);
188 } else {
189 for (i = 0; i < len / sizeof(*page); i++)
190 page[i] = value;
191 }
192}
193
194static bool page_same_filled(void *ptr, unsigned long *element)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700195{
196 unsigned int pos;
197 unsigned long *page;
Sangwoo Parkf0fe9982017-05-03 14:55:56 -0700198 unsigned long val;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700199
200 page = (unsigned long *)ptr;
Sangwoo Parkf0fe9982017-05-03 14:55:56 -0700201 val = page[0];
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700202
Sangwoo Parkf0fe9982017-05-03 14:55:56 -0700203 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
204 if (val != page[pos])
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800205 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700206 }
207
Sangwoo Parkf0fe9982017-05-03 14:55:56 -0700208 *element = val;
zhouxianrong8e19d542017-02-24 14:59:27 -0800209
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800210 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700211}
212
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300213static ssize_t initstate_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700216 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300217 struct zram *zram = dev_to_zram(dev);
218
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700219 down_read(&zram->init_lock);
220 val = init_done(zram);
221 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300222
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700223 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300224}
225
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700226static ssize_t disksize_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
228{
229 struct zram *zram = dev_to_zram(dev);
230
231 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
232}
233
Minchan Kim9ada9da2014-10-09 15:29:53 -0700234static ssize_t mem_limit_store(struct device *dev,
235 struct device_attribute *attr, const char *buf, size_t len)
236{
237 u64 limit;
238 char *tmp;
239 struct zram *zram = dev_to_zram(dev);
240
241 limit = memparse(buf, &tmp);
242 if (buf == tmp) /* no chars parsed, invalid input */
243 return -EINVAL;
244
245 down_write(&zram->init_lock);
246 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
247 up_write(&zram->init_lock);
248
249 return len;
250}
251
Minchan Kim461a8ee2014-10-09 15:29:55 -0700252static ssize_t mem_used_max_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t len)
254{
255 int err;
256 unsigned long val;
257 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700258
259 err = kstrtoul(buf, 10, &val);
260 if (err || val != 0)
261 return -EINVAL;
262
263 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700264 if (init_done(zram)) {
Minchan Kim461a8ee2014-10-09 15:29:55 -0700265 atomic_long_set(&zram->stats.max_used_pages,
Minchan Kimbeb66022017-05-03 14:55:47 -0700266 zs_get_total_pages(zram->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700267 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700268 up_read(&zram->init_lock);
269
270 return len;
271}
272
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700273/*
274 * We switched to per-cpu streams and this attr is not needed anymore.
275 * However, we will keep it around for some time, because:
276 * a) we may revert per-cpu streams in the future
277 * b) it's visible to user space and we need to follow our 2 years
278 * retirement rule; but we already have a number of 'soon to be
279 * altered' attrs, so max_comp_streams need to wait for the next
280 * layoff cycle.
281 */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700282static ssize_t max_comp_streams_show(struct device *dev,
283 struct device_attribute *attr, char *buf)
284{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700285 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700286}
287
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700288static ssize_t max_comp_streams_store(struct device *dev,
289 struct device_attribute *attr, const char *buf, size_t len)
290{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700291 return len;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700292}
293
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700294static ssize_t comp_algorithm_show(struct device *dev,
295 struct device_attribute *attr, char *buf)
296{
297 size_t sz;
298 struct zram *zram = dev_to_zram(dev);
299
300 down_read(&zram->init_lock);
301 sz = zcomp_available_show(zram->compressor, buf);
302 up_read(&zram->init_lock);
303
304 return sz;
305}
306
307static ssize_t comp_algorithm_store(struct device *dev,
308 struct device_attribute *attr, const char *buf, size_t len)
309{
310 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700311 char compressor[CRYPTO_MAX_ALG_NAME];
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700312 size_t sz;
313
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700314 strlcpy(compressor, buf, sizeof(compressor));
315 /* ignore trailing newline */
316 sz = strlen(compressor);
317 if (sz > 0 && compressor[sz - 1] == '\n')
318 compressor[sz - 1] = 0x00;
319
320 if (!zcomp_available_algorithm(compressor))
Luis Henriques1d5b43b2015-11-06 16:29:01 -0800321 return -EINVAL;
322
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700323 down_write(&zram->init_lock);
324 if (init_done(zram)) {
325 up_write(&zram->init_lock);
326 pr_info("Can't change algorithm for initialized device\n");
327 return -EBUSY;
328 }
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700329
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700330 strlcpy(zram->compressor, compressor, sizeof(compressor));
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700331 up_write(&zram->init_lock);
332 return len;
333}
334
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700335static ssize_t compact_store(struct device *dev,
336 struct device_attribute *attr, const char *buf, size_t len)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530337{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700338 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700339
340 down_read(&zram->init_lock);
341 if (!init_done(zram)) {
342 up_read(&zram->init_lock);
343 return -EINVAL;
344 }
345
Minchan Kimbeb66022017-05-03 14:55:47 -0700346 zs_compact(zram->mem_pool);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700347 up_read(&zram->init_lock);
348
349 return len;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530350}
351
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700352static ssize_t io_stat_show(struct device *dev,
353 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530354{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700355 struct zram *zram = dev_to_zram(dev);
356 ssize_t ret;
357
358 down_read(&zram->init_lock);
359 ret = scnprintf(buf, PAGE_SIZE,
360 "%8llu %8llu %8llu %8llu\n",
361 (u64)atomic64_read(&zram->stats.failed_reads),
362 (u64)atomic64_read(&zram->stats.failed_writes),
363 (u64)atomic64_read(&zram->stats.invalid_io),
364 (u64)atomic64_read(&zram->stats.notify_free));
365 up_read(&zram->init_lock);
366
367 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530368}
369
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700370static ssize_t mm_stat_show(struct device *dev,
371 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530372{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700373 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700374 struct zs_pool_stats pool_stats;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700375 u64 orig_size, mem_used = 0;
376 long max_used;
377 ssize_t ret;
378
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700379 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
380
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700381 down_read(&zram->init_lock);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700382 if (init_done(zram)) {
Minchan Kimbeb66022017-05-03 14:55:47 -0700383 mem_used = zs_get_total_pages(zram->mem_pool);
384 zs_pool_stats(zram->mem_pool, &pool_stats);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700385 }
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700386
387 orig_size = atomic64_read(&zram->stats.pages_stored);
388 max_used = atomic_long_read(&zram->stats.max_used_pages);
389
390 ret = scnprintf(buf, PAGE_SIZE,
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700391 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700392 orig_size << PAGE_SHIFT,
393 (u64)atomic64_read(&zram->stats.compr_data_size),
394 mem_used << PAGE_SHIFT,
395 zram->limit_pages << PAGE_SHIFT,
396 max_used << PAGE_SHIFT,
zhouxianrong8e19d542017-02-24 14:59:27 -0800397 (u64)atomic64_read(&zram->stats.same_pages),
Sergey Senozhatsky860c7072015-09-08 15:04:38 -0700398 pool_stats.pages_compacted);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700399 up_read(&zram->init_lock);
400
401 return ret;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700402}
403
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700404static ssize_t debug_stat_show(struct device *dev,
405 struct device_attribute *attr, char *buf)
406{
407 int version = 1;
408 struct zram *zram = dev_to_zram(dev);
409 ssize_t ret;
410
411 down_read(&zram->init_lock);
412 ret = scnprintf(buf, PAGE_SIZE,
413 "version: %d\n%8llu\n",
414 version,
415 (u64)atomic64_read(&zram->stats.writestall));
416 up_read(&zram->init_lock);
417
418 return ret;
419}
420
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700421static DEVICE_ATTR_RO(io_stat);
422static DEVICE_ATTR_RO(mm_stat);
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700423static DEVICE_ATTR_RO(debug_stat);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700424
Minchan Kim86c49812017-05-03 14:55:44 -0700425static void zram_slot_lock(struct zram *zram, u32 index)
426{
Minchan Kimbeb66022017-05-03 14:55:47 -0700427 bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
Minchan Kim86c49812017-05-03 14:55:44 -0700428}
429
430static void zram_slot_unlock(struct zram *zram, u32 index)
431{
Minchan Kimbeb66022017-05-03 14:55:47 -0700432 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
Minchan Kim86c49812017-05-03 14:55:44 -0700433}
434
Minchan Kim1f7319c2017-05-03 14:55:41 -0700435static bool zram_same_page_read(struct zram *zram, u32 index,
436 struct page *page,
437 unsigned int offset, unsigned int len)
438{
Minchan Kim86c49812017-05-03 14:55:44 -0700439 zram_slot_lock(zram, index);
Minchan Kim643ae612017-05-03 14:55:50 -0700440 if (unlikely(!zram_get_handle(zram, index) ||
441 zram_test_flag(zram, index, ZRAM_SAME))) {
Minchan Kim1f7319c2017-05-03 14:55:41 -0700442 void *mem;
443
Minchan Kim86c49812017-05-03 14:55:44 -0700444 zram_slot_unlock(zram, index);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700445 mem = kmap_atomic(page);
Minchan Kim643ae612017-05-03 14:55:50 -0700446 zram_fill_page(mem + offset, len,
447 zram_get_element(zram, index));
Minchan Kim1f7319c2017-05-03 14:55:41 -0700448 kunmap_atomic(mem);
449 return true;
450 }
Minchan Kim86c49812017-05-03 14:55:44 -0700451 zram_slot_unlock(zram, index);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700452
453 return false;
454}
455
456static bool zram_same_page_write(struct zram *zram, u32 index,
457 struct page *page)
458{
459 unsigned long element;
460 void *mem = kmap_atomic(page);
461
462 if (page_same_filled(mem, &element)) {
Minchan Kim1f7319c2017-05-03 14:55:41 -0700463 kunmap_atomic(mem);
464 /* Free memory associated with this sector now. */
Minchan Kim86c49812017-05-03 14:55:44 -0700465 zram_slot_lock(zram, index);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700466 zram_free_page(zram, index);
Minchan Kimbeb66022017-05-03 14:55:47 -0700467 zram_set_flag(zram, index, ZRAM_SAME);
468 zram_set_element(zram, index, element);
Minchan Kim86c49812017-05-03 14:55:44 -0700469 zram_slot_unlock(zram, index);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700470
471 atomic64_inc(&zram->stats.same_pages);
Minchan Kim51f9f822017-07-06 15:37:12 -0700472 atomic64_inc(&zram->stats.pages_stored);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700473 return true;
474 }
475 kunmap_atomic(mem);
476
477 return false;
478}
479
Minchan Kimbeb66022017-05-03 14:55:47 -0700480static void zram_meta_free(struct zram *zram, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300481{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800482 size_t num_pages = disksize >> PAGE_SHIFT;
483 size_t index;
484
485 /* Free all pages that are still in this zram device */
Minchan Kim302128d2017-05-03 14:55:53 -0700486 for (index = 0; index < num_pages; index++)
487 zram_free_page(zram, index);
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800488
Minchan Kimbeb66022017-05-03 14:55:47 -0700489 zs_destroy_pool(zram->mem_pool);
490 vfree(zram->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300491}
492
Minchan Kimbeb66022017-05-03 14:55:47 -0700493static bool zram_meta_alloc(struct zram *zram, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300494{
495 size_t num_pages;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300496
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300497 num_pages = disksize >> PAGE_SHIFT;
Minchan Kimbeb66022017-05-03 14:55:47 -0700498 zram->table = vzalloc(num_pages * sizeof(*zram->table));
499 if (!zram->table)
500 return false;
501
502 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
503 if (!zram->mem_pool) {
504 vfree(zram->table);
505 return false;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300506 }
507
Minchan Kimbeb66022017-05-03 14:55:47 -0700508 return true;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300509}
510
Weijie Yangd2d5e762014-08-06 16:08:31 -0700511/*
512 * To protect concurrent access to the same index entry,
513 * caller should hold this table index entry's bit_spinlock to
514 * indicate this index entry is accessing.
515 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530516static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530517{
Minchan Kim643ae612017-05-03 14:55:50 -0700518 unsigned long handle = zram_get_handle(zram, index);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530519
zhouxianrong8e19d542017-02-24 14:59:27 -0800520 /*
521 * No memory is allocated for same element filled pages.
522 * Simply clear same page flag.
523 */
Minchan Kimbeb66022017-05-03 14:55:47 -0700524 if (zram_test_flag(zram, index, ZRAM_SAME)) {
525 zram_clear_flag(zram, index, ZRAM_SAME);
Minchan Kim643ae612017-05-03 14:55:50 -0700526 zram_set_element(zram, index, 0);
zhouxianrong8e19d542017-02-24 14:59:27 -0800527 atomic64_dec(&zram->stats.same_pages);
Minchan Kim51f9f822017-07-06 15:37:12 -0700528 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530529 return;
530 }
531
zhouxianrong8e19d542017-02-24 14:59:27 -0800532 if (!handle)
533 return;
534
Minchan Kimbeb66022017-05-03 14:55:47 -0700535 zs_free(zram->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530536
Minchan Kimbeb66022017-05-03 14:55:47 -0700537 atomic64_sub(zram_get_obj_size(zram, index),
Weijie Yangd2d5e762014-08-06 16:08:31 -0700538 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700539 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530540
Minchan Kim643ae612017-05-03 14:55:50 -0700541 zram_set_handle(zram, index, 0);
Minchan Kimbeb66022017-05-03 14:55:47 -0700542 zram_set_obj_size(zram, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530543}
544
Minchan Kim1f7319c2017-05-03 14:55:41 -0700545static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530546{
Minchan Kim1f7319c2017-05-03 14:55:41 -0700547 int ret;
Minchan Kim92967472014-01-30 15:46:03 -0800548 unsigned long handle;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700549 unsigned int size;
Minchan Kim1f7319c2017-05-03 14:55:41 -0700550 void *src, *dst;
Minchan Kim1f7319c2017-05-03 14:55:41 -0700551
552 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
553 return 0;
Minchan Kim92967472014-01-30 15:46:03 -0800554
Minchan Kim86c49812017-05-03 14:55:44 -0700555 zram_slot_lock(zram, index);
Minchan Kim643ae612017-05-03 14:55:50 -0700556 handle = zram_get_handle(zram, index);
Minchan Kimbeb66022017-05-03 14:55:47 -0700557 size = zram_get_obj_size(zram, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200558
Minchan Kimbeb66022017-05-03 14:55:47 -0700559 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700560 if (size == PAGE_SIZE) {
Minchan Kim1f7319c2017-05-03 14:55:41 -0700561 dst = kmap_atomic(page);
562 memcpy(dst, src, PAGE_SIZE);
563 kunmap_atomic(dst);
564 ret = 0;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700565 } else {
566 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
567
Minchan Kim1f7319c2017-05-03 14:55:41 -0700568 dst = kmap_atomic(page);
569 ret = zcomp_decompress(zstrm, src, size, dst);
570 kunmap_atomic(dst);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700571 zcomp_stream_put(zram->comp);
572 }
Minchan Kimbeb66022017-05-03 14:55:47 -0700573 zs_unmap_object(zram->mem_pool, handle);
Minchan Kim86c49812017-05-03 14:55:44 -0700574 zram_slot_unlock(zram, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200575
576 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700577 if (unlikely(ret))
Minchan Kim1f7319c2017-05-03 14:55:41 -0700578 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300579
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300580 return ret;
581}
582
Minchan Kim1f7319c2017-05-03 14:55:41 -0700583static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
584 u32 index, int offset)
Jerome Marchand924bd882011-06-10 15:28:48 +0200585{
Minchan Kim1f7319c2017-05-03 14:55:41 -0700586 int ret;
Minchan Kim130f3152012-06-08 15:39:27 +0900587 struct page *page;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200588
589 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200590 if (is_partial_io(bvec)) {
Minchan Kim1f7319c2017-05-03 14:55:41 -0700591 /* Use a temporary buffer to decompress the page */
592 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
593 if (!page)
594 return -ENOMEM;
Jerome Marchand924bd882011-06-10 15:28:48 +0200595 }
596
Minchan Kim1f7319c2017-05-03 14:55:41 -0700597 ret = zram_decompress_page(zram, page, index);
598 if (unlikely(ret))
599 goto out;
600
601 if (is_partial_io(bvec)) {
602 void *dst = kmap_atomic(bvec->bv_page);
603 void *src = kmap_atomic(page);
604
605 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
606 kunmap_atomic(src);
607 kunmap_atomic(dst);
608 }
609out:
610 if (is_partial_io(bvec))
611 __free_page(page);
612
613 return ret;
614}
615
616static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
617 struct page *page,
618 unsigned long *out_handle, unsigned int *out_comp_len)
619{
620 int ret;
621 unsigned int comp_len;
622 void *src;
623 unsigned long alloced_pages;
624 unsigned long handle = 0;
Minchan Kim1f7319c2017-05-03 14:55:41 -0700625
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700626compress_again:
Minchan Kim1f7319c2017-05-03 14:55:41 -0700627 src = kmap_atomic(page);
628 ret = zcomp_compress(*zstrm, src, &comp_len);
629 kunmap_atomic(src);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200630
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700631 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200632 pr_err("Compression failed! err=%d\n", ret);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700633 if (handle)
Minchan Kimbeb66022017-05-03 14:55:47 -0700634 zs_free(zram->mem_pool, handle);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700635 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200636 }
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700637
Minchan Kim1f7319c2017-05-03 14:55:41 -0700638 if (unlikely(comp_len > max_zpage_size))
639 comp_len = PAGE_SIZE;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200640
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700641 /*
642 * handle allocation has 2 paths:
643 * a) fast path is executed with preemption disabled (for
644 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
645 * since we can't sleep;
646 * b) slow path enables preemption and attempts to allocate
647 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
648 * put per-cpu compression stream and, thus, to re-do
649 * the compression once handle is allocated.
650 *
651 * if we have a 'non-null' handle here then we are coming
652 * from the slow path and handle has already been allocated.
653 */
654 if (!handle)
Minchan Kimbeb66022017-05-03 14:55:47 -0700655 handle = zs_malloc(zram->mem_pool, comp_len,
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700656 __GFP_KSWAPD_RECLAIM |
657 __GFP_NOWARN |
Minchan Kim9bc482d2016-07-26 15:23:34 -0700658 __GFP_HIGHMEM |
659 __GFP_MOVABLE);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600660 if (!handle) {
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700661 zcomp_stream_put(zram->comp);
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700662 atomic64_inc(&zram->stats.writestall);
Minchan Kimbeb66022017-05-03 14:55:47 -0700663 handle = zs_malloc(zram->mem_pool, comp_len,
Minchan Kim9bc482d2016-07-26 15:23:34 -0700664 GFP_NOIO | __GFP_HIGHMEM |
665 __GFP_MOVABLE);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700666 *zstrm = zcomp_stream_get(zram->comp);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700667 if (handle)
668 goto compress_again;
Minchan Kim1f7319c2017-05-03 14:55:41 -0700669 return -ENOMEM;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200670 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700671
Minchan Kimbeb66022017-05-03 14:55:47 -0700672 alloced_pages = zs_get_total_pages(zram->mem_pool);
Sergey SENOZHATSKY12372752015-11-06 16:29:04 -0800673 update_used_max(zram, alloced_pages);
674
Minchan Kim461a8ee2014-10-09 15:29:55 -0700675 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kimbeb66022017-05-03 14:55:47 -0700676 zs_free(zram->mem_pool, handle);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700677 return -ENOMEM;
Minchan Kim9ada9da2014-10-09 15:29:53 -0700678 }
679
Minchan Kim1f7319c2017-05-03 14:55:41 -0700680 *out_handle = handle;
681 *out_comp_len = comp_len;
682 return 0;
683}
Jerome Marchand8c921b22011-06-10 15:28:47 +0200684
Minchan Kim1f7319c2017-05-03 14:55:41 -0700685static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
686{
687 int ret;
688 unsigned long handle;
689 unsigned int comp_len;
690 void *src, *dst;
691 struct zcomp_strm *zstrm;
Minchan Kim1f7319c2017-05-03 14:55:41 -0700692 struct page *page = bvec->bv_page;
693
694 if (zram_same_page_write(zram, index, page))
695 return 0;
696
697 zstrm = zcomp_stream_get(zram->comp);
698 ret = zram_compress(zram, &zstrm, page, &handle, &comp_len);
699 if (ret) {
700 zcomp_stream_put(zram->comp);
701 return ret;
702 }
703
Minchan Kimbeb66022017-05-03 14:55:47 -0700704 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700705
706 src = zstrm->buffer;
707 if (comp_len == PAGE_SIZE)
Nitin Gupta397c6062013-01-02 08:53:41 -0800708 src = kmap_atomic(page);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700709 memcpy(dst, src, comp_len);
710 if (comp_len == PAGE_SIZE)
Nitin Gupta397c6062013-01-02 08:53:41 -0800711 kunmap_atomic(src);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200712
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700713 zcomp_stream_put(zram->comp);
Minchan Kimbeb66022017-05-03 14:55:47 -0700714 zs_unmap_object(zram->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600715
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900716 /*
717 * Free memory associated with this sector
718 * before overwriting unused sectors.
719 */
Minchan Kim86c49812017-05-03 14:55:44 -0700720 zram_slot_lock(zram, index);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900721 zram_free_page(zram, index);
Minchan Kim643ae612017-05-03 14:55:50 -0700722 zram_set_handle(zram, index, handle);
Minchan Kimbeb66022017-05-03 14:55:47 -0700723 zram_set_obj_size(zram, index, comp_len);
Minchan Kim86c49812017-05-03 14:55:44 -0700724 zram_slot_unlock(zram, index);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200725
726 /* Update stats */
Minchan Kim1f7319c2017-05-03 14:55:41 -0700727 atomic64_add(comp_len, &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700728 atomic64_inc(&zram->stats.pages_stored);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700729 return 0;
730}
731
732static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
733 u32 index, int offset)
734{
735 int ret;
736 struct page *page = NULL;
737 void *src;
738 struct bio_vec vec;
739
740 vec = *bvec;
741 if (is_partial_io(bvec)) {
742 void *dst;
743 /*
744 * This is a partial IO. We need to read the full page
745 * before to write the changes.
746 */
747 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
748 if (!page)
749 return -ENOMEM;
750
751 ret = zram_decompress_page(zram, page, index);
752 if (ret)
753 goto out;
754
755 src = kmap_atomic(bvec->bv_page);
756 dst = kmap_atomic(page);
757 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
758 kunmap_atomic(dst);
759 kunmap_atomic(src);
760
761 vec.bv_page = page;
762 vec.bv_len = PAGE_SIZE;
763 vec.bv_offset = 0;
764 }
765
766 ret = __zram_bvec_write(zram, &vec, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200767out:
Nitin Gupta397c6062013-01-02 08:53:41 -0800768 if (is_partial_io(bvec))
Minchan Kim1f7319c2017-05-03 14:55:41 -0700769 __free_page(page);
Jerome Marchand924bd882011-06-10 15:28:48 +0200770 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200771}
772
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700773/*
774 * zram_bio_discard - handler on discard request
775 * @index: physical block index in PAGE_SIZE units
776 * @offset: byte offset within physical block
777 */
778static void zram_bio_discard(struct zram *zram, u32 index,
779 int offset, struct bio *bio)
780{
781 size_t n = bio->bi_iter.bi_size;
782
783 /*
784 * zram manages data in physical block size units. Because logical block
785 * size isn't identical with physical block size on some arch, we
786 * could get a discard request pointing to a specific offset within a
787 * certain physical block. Although we can handle this request by
788 * reading that physiclal block and decompressing and partially zeroing
789 * and re-compressing and then re-storing it, this isn't reasonable
790 * because our intent with a discard request is to save memory. So
791 * skipping this logical block is appropriate here.
792 */
793 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700794 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700795 return;
796
Weijie Yang38515c72014-06-04 16:11:06 -0700797 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700798 index++;
799 }
800
801 while (n >= PAGE_SIZE) {
Minchan Kim86c49812017-05-03 14:55:44 -0700802 zram_slot_lock(zram, index);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700803 zram_free_page(zram, index);
Minchan Kim86c49812017-05-03 14:55:44 -0700804 zram_slot_unlock(zram, index);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700805 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700806 index++;
807 n -= PAGE_SIZE;
808 }
809}
810
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700811static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Jens Axboec11f0c02016-08-05 08:11:04 -0600812 int offset, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700813{
814 unsigned long start_time = jiffies;
Jens Axboec11f0c02016-08-05 08:11:04 -0600815 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700816 int ret;
817
Jens Axboec11f0c02016-08-05 08:11:04 -0600818 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700819 &zram->disk->part0);
820
Jens Axboec11f0c02016-08-05 08:11:04 -0600821 if (!is_write) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700822 atomic64_inc(&zram->stats.num_reads);
823 ret = zram_bvec_read(zram, bvec, index, offset);
Minchan Kim1f7319c2017-05-03 14:55:41 -0700824 flush_dcache_page(bvec->bv_page);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700825 } else {
826 atomic64_inc(&zram->stats.num_writes);
827 ret = zram_bvec_write(zram, bvec, index, offset);
828 }
829
Jens Axboec11f0c02016-08-05 08:11:04 -0600830 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700831
832 if (unlikely(ret)) {
Jens Axboec11f0c02016-08-05 08:11:04 -0600833 if (!is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700834 atomic64_inc(&zram->stats.failed_reads);
835 else
836 atomic64_inc(&zram->stats.failed_writes);
837 }
838
839 return ret;
840}
841
842static void __zram_make_request(struct zram *zram, struct bio *bio)
843{
Mike Christieabf54542016-08-04 14:23:34 -0600844 int offset;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700845 u32 index;
846 struct bio_vec bvec;
847 struct bvec_iter iter;
848
849 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
850 offset = (bio->bi_iter.bi_sector &
851 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
852
Christoph Hellwig31edeac2017-04-05 19:21:14 +0200853 switch (bio_op(bio)) {
854 case REQ_OP_DISCARD:
855 case REQ_OP_WRITE_ZEROES:
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700856 zram_bio_discard(zram, index, offset, bio);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200857 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700858 return;
Christoph Hellwig31edeac2017-04-05 19:21:14 +0200859 default:
860 break;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700861 }
862
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700863 bio_for_each_segment(bvec, bio, iter) {
Minchan Kime86942c2017-05-03 14:55:38 -0700864 struct bio_vec bv = bvec;
865 unsigned int unwritten = bvec.bv_len;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700866
Minchan Kime86942c2017-05-03 14:55:38 -0700867 do {
868 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
869 unwritten);
Mike Christieabf54542016-08-04 14:23:34 -0600870 if (zram_bvec_rw(zram, &bv, index, offset,
Minchan Kime86942c2017-05-03 14:55:38 -0700871 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700872 goto out;
873
Minchan Kime86942c2017-05-03 14:55:38 -0700874 bv.bv_offset += bv.bv_len;
875 unwritten -= bv.bv_len;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700876
Minchan Kime86942c2017-05-03 14:55:38 -0700877 update_position(&index, &offset, &bv);
878 } while (unwritten);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700879 }
880
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200881 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700882 return;
883
884out:
885 bio_io_error(bio);
886}
887
888/*
889 * Handler function for all zram I/O requests.
890 */
Jens Axboedece1632015-11-05 10:41:16 -0700891static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700892{
893 struct zram *zram = queue->queuedata;
894
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700895 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
896 bio->bi_iter.bi_size)) {
897 atomic64_inc(&zram->stats.invalid_io);
Minchan Kima09759a2017-02-24 14:56:47 -0800898 goto error;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700899 }
900
901 __zram_make_request(zram, bio);
Jens Axboedece1632015-11-05 10:41:16 -0700902 return BLK_QC_T_NONE;
Minchan Kima09759a2017-02-24 14:56:47 -0800903
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700904error:
905 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700906 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700907}
908
909static void zram_slot_free_notify(struct block_device *bdev,
910 unsigned long index)
911{
912 struct zram *zram;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700913
914 zram = bdev->bd_disk->private_data;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700915
Minchan Kim86c49812017-05-03 14:55:44 -0700916 zram_slot_lock(zram, index);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700917 zram_free_page(zram, index);
Minchan Kim86c49812017-05-03 14:55:44 -0700918 zram_slot_unlock(zram, index);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700919 atomic64_inc(&zram->stats.notify_free);
920}
921
922static int zram_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -0600923 struct page *page, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700924{
925 int offset, err = -EIO;
926 u32 index;
927 struct zram *zram;
928 struct bio_vec bv;
929
930 zram = bdev->bd_disk->private_data;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700931
932 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
933 atomic64_inc(&zram->stats.invalid_io);
934 err = -EINVAL;
Minchan Kima09759a2017-02-24 14:56:47 -0800935 goto out;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700936 }
937
938 index = sector >> SECTORS_PER_PAGE_SHIFT;
Minchan Kim4ca82da2017-04-13 14:56:35 -0700939 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700940
941 bv.bv_page = page;
942 bv.bv_len = PAGE_SIZE;
943 bv.bv_offset = 0;
944
Jens Axboec11f0c02016-08-05 08:11:04 -0600945 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700946out:
947 /*
948 * If I/O fails, just return error(ie, non-zero) without
949 * calling page_endio.
950 * It causes resubmit the I/O with bio request by upper functions
951 * of rw_page(e.g., swap_readpage, __swap_writepage) and
952 * bio->bi_end_io does things to handle the error
953 * (e.g., SetPageError, set_page_dirty and extra works).
954 */
955 if (err == 0)
Jens Axboec11f0c02016-08-05 08:11:04 -0600956 page_endio(page, is_write, 0);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700957 return err;
958}
959
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800960static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +0200961{
Minchan Kim08eee692015-02-12 15:00:45 -0800962 struct zcomp *comp;
963 u64 disksize;
964
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300965 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -0700966
967 zram->limit_pages = 0;
968
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700969 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300970 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300971 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300972 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300973
Minchan Kim08eee692015-02-12 15:00:45 -0800974 comp = zram->comp;
975 disksize = zram->disksize;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300976 zram->disksize = 0;
Weijie Yangd7ad41a2015-06-10 11:14:49 -0700977
Sergey Senozhatskya096caf2015-02-12 15:00:39 -0800978 set_capacity(zram->disk, 0);
Weijie Yangd7ad41a2015-06-10 11:14:49 -0700979 part_stat_set_all(&zram->disk->part0, 0);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -0800980
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300981 up_write(&zram->init_lock);
Minchan Kim08eee692015-02-12 15:00:45 -0800982 /* I/O operation under all of CPU are done so let's free */
Minchan Kimbeb66022017-05-03 14:55:47 -0700983 zram_meta_free(zram, disksize);
Minchan Kim302128d2017-05-03 14:55:53 -0700984 memset(&zram->stats, 0, sizeof(zram->stats));
Minchan Kim08eee692015-02-12 15:00:45 -0800985 zcomp_destroy(comp);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300986}
987
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300988static ssize_t disksize_store(struct device *dev,
989 struct device_attribute *attr, const char *buf, size_t len)
990{
991 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700992 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300993 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700994 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300995
996 disksize = memparse(buf, NULL);
997 if (!disksize)
998 return -EINVAL;
999
Minchan Kimbeb66022017-05-03 14:55:47 -07001000 down_write(&zram->init_lock);
1001 if (init_done(zram)) {
1002 pr_info("Cannot change disksize for initialized device\n");
1003 err = -EBUSY;
1004 goto out_unlock;
1005 }
1006
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001007 disksize = PAGE_ALIGN(disksize);
Minchan Kimbeb66022017-05-03 14:55:47 -07001008 if (!zram_meta_alloc(zram, disksize)) {
1009 err = -ENOMEM;
1010 goto out_unlock;
1011 }
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001012
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001013 comp = zcomp_create(zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001014 if (IS_ERR(comp)) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001015 pr_err("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001016 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001017 err = PTR_ERR(comp);
1018 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001019 }
1020
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001021 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001022 zram->disksize = disksize;
1023 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Minchan Kimb09ab052017-01-10 16:58:21 -08001024 zram_revalidate_disk(zram);
Minchan Kime7ccfc42017-01-10 16:58:18 -08001025 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -07001026
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001027 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001028
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001029out_free_meta:
Minchan Kimbeb66022017-05-03 14:55:47 -07001030 zram_meta_free(zram, disksize);
1031out_unlock:
1032 up_write(&zram->init_lock);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001033 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001034}
1035
1036static ssize_t reset_store(struct device *dev,
1037 struct device_attribute *attr, const char *buf, size_t len)
1038{
1039 int ret;
1040 unsigned short do_reset;
1041 struct zram *zram;
1042 struct block_device *bdev;
1043
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001044 ret = kstrtou16(buf, 10, &do_reset);
1045 if (ret)
1046 return ret;
1047
1048 if (!do_reset)
1049 return -EINVAL;
1050
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001051 zram = dev_to_zram(dev);
1052 bdev = bdget_disk(zram->disk, 0);
Rashika Kheria46a51c82013-10-30 18:36:32 +05301053 if (!bdev)
1054 return -ENOMEM;
1055
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001056 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001057 /* Do not reset an active device or claimed device */
1058 if (bdev->bd_openers || zram->claim) {
1059 mutex_unlock(&bdev->bd_mutex);
1060 bdput(bdev);
1061 return -EBUSY;
Rashika Kheria1b672222013-11-10 22:13:53 +05301062 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001063
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001064 /* From now on, anyone can't open /dev/zram[0-9] */
1065 zram->claim = true;
1066 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001067
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001068 /* Make sure all the pending I/O are finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +05301069 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001070 zram_reset_device(zram);
Minchan Kimb09ab052017-01-10 16:58:21 -08001071 zram_revalidate_disk(zram);
Rashika Kheria1b672222013-11-10 22:13:53 +05301072 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001073
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001074 mutex_lock(&bdev->bd_mutex);
1075 zram->claim = false;
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001076 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001077
1078 return len;
1079}
1080
1081static int zram_open(struct block_device *bdev, fmode_t mode)
1082{
1083 int ret = 0;
1084 struct zram *zram;
1085
1086 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1087
1088 zram = bdev->bd_disk->private_data;
1089 /* zram was claimed to reset so open request fails */
1090 if (zram->claim)
1091 ret = -EBUSY;
1092
Rashika Kheria1b672222013-11-10 22:13:53 +05301093 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001094}
1095
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301096static const struct block_device_operations zram_devops = {
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001097 .open = zram_open,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301098 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -08001099 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +05301100 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +05301101};
1102
Andrew Morton99ebbd32015-05-05 16:23:25 -07001103static DEVICE_ATTR_WO(compact);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001104static DEVICE_ATTR_RW(disksize);
1105static DEVICE_ATTR_RO(initstate);
1106static DEVICE_ATTR_WO(reset);
Sergey Senozhatskyc87d1652017-02-22 15:46:45 -08001107static DEVICE_ATTR_WO(mem_limit);
1108static DEVICE_ATTR_WO(mem_used_max);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001109static DEVICE_ATTR_RW(max_comp_streams);
1110static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001111
1112static struct attribute *zram_disk_attrs[] = {
1113 &dev_attr_disksize.attr,
1114 &dev_attr_initstate.attr,
1115 &dev_attr_reset.attr,
Andrew Morton99ebbd32015-05-05 16:23:25 -07001116 &dev_attr_compact.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001117 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001118 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001119 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001120 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky2f6a3be2015-04-15 16:16:03 -07001121 &dev_attr_io_stat.attr,
Sergey Senozhatsky4f2109f2015-04-15 16:16:06 -07001122 &dev_attr_mm_stat.attr,
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001123 &dev_attr_debug_stat.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001124 NULL,
1125};
1126
Arvind Yadavbc1bb362017-07-10 15:50:15 -07001127static const struct attribute_group zram_disk_attr_group = {
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001128 .attrs = zram_disk_attrs,
1129};
1130
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001131/*
1132 * Allocate and initialize new zram device. the function returns
1133 * '>= 0' device_id upon success, and negative value otherwise.
1134 */
1135static int zram_add(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301136{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001137 struct zram *zram;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001138 struct request_queue *queue;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001139 int ret, device_id;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001140
1141 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1142 if (!zram)
1143 return -ENOMEM;
1144
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001145 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001146 if (ret < 0)
1147 goto out_free_dev;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001148 device_id = ret;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301149
Jerome Marchand0900bea2011-09-06 15:02:11 +02001150 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301151
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001152 queue = blk_alloc_queue(GFP_KERNEL);
1153 if (!queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301154 pr_err("Error allocating disk queue for device %d\n",
1155 device_id);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001156 ret = -ENOMEM;
1157 goto out_free_idr;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301158 }
1159
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001160 blk_queue_make_request(queue, zram_make_request);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301161
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001162 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301163 zram->disk = alloc_disk(1);
1164 if (!zram->disk) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001165 pr_err("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301166 device_id);
Julia Lawall201c7b72015-04-15 16:16:27 -07001167 ret = -ENOMEM;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001168 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301169 }
1170
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301171 zram->disk->major = zram_major;
1172 zram->disk->first_minor = device_id;
1173 zram->disk->fops = &zram_devops;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001174 zram->disk->queue = queue;
1175 zram->disk->queue->queuedata = zram;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301176 zram->disk->private_data = zram;
1177 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301178
Nitin Gupta33863c22010-08-09 22:56:47 +05301179 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301180 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001181 /* zram devices sort of resembles non-rotational disks */
1182 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001183 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301184 /*
1185 * To ensure that we always get PAGE_SIZE aligned
1186 * and n*PAGE_SIZED sized I/O requests.
1187 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301188 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001189 blk_queue_logical_block_size(zram->disk->queue,
1190 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301191 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1192 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001193 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001194 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
Christoph Hellwig31edeac2017-04-05 19:21:14 +02001195 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1196
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001197 /*
1198 * zram_bio_discard() will clear all logical blocks if logical block
1199 * size is identical with physical block size(PAGE_SIZE). But if it is
1200 * different, we will skip discarding some parts of logical blocks in
1201 * the part of the request range which isn't aligned to physical block
1202 * size. So we can't ensure that all discarded logical blocks are
1203 * zeroed.
1204 */
1205 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
Christoph Hellwig31edeac2017-04-05 19:21:14 +02001206 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301207
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301208 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301209
Nitin Gupta33863c22010-08-09 22:56:47 +05301210 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1211 &zram_disk_attr_group);
1212 if (ret < 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001213 pr_err("Error creating sysfs group for device %d\n",
1214 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001215 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301216 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001217 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskyd12b63c2015-06-25 15:00:14 -07001218
1219 pr_info("Added device: %s\n", zram->disk->disk_name);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001220 return device_id;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301221
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001222out_free_disk:
1223 del_gendisk(zram->disk);
1224 put_disk(zram->disk);
1225out_free_queue:
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001226 blk_cleanup_queue(queue);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001227out_free_idr:
1228 idr_remove(&zram_index_idr, device_id);
1229out_free_dev:
1230 kfree(zram);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301231 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301232}
1233
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001234static int zram_remove(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301235{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001236 struct block_device *bdev;
1237
1238 bdev = bdget_disk(zram->disk, 0);
1239 if (!bdev)
1240 return -ENOMEM;
1241
1242 mutex_lock(&bdev->bd_mutex);
1243 if (bdev->bd_openers || zram->claim) {
1244 mutex_unlock(&bdev->bd_mutex);
1245 bdput(bdev);
1246 return -EBUSY;
1247 }
1248
1249 zram->claim = true;
1250 mutex_unlock(&bdev->bd_mutex);
1251
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001252 /*
1253 * Remove sysfs first, so no one will perform a disksize
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001254 * store while we destroy the devices. This also helps during
1255 * hot_remove -- zram_reset_device() is the last holder of
1256 * ->init_lock, no later/concurrent disksize_store() or any
1257 * other sysfs handlers are possible.
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001258 */
1259 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1260 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +05301261
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001262 /* Make sure all the pending I/O are finished */
1263 fsync_bdev(bdev);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001264 zram_reset_device(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001265 bdput(bdev);
1266
1267 pr_info("Removed device: %s\n", zram->disk->disk_name);
1268
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001269 blk_cleanup_queue(zram->disk->queue);
1270 del_gendisk(zram->disk);
1271 put_disk(zram->disk);
1272 kfree(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001273 return 0;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001274}
Nitin Gupta306b0c92009-09-22 10:26:53 +05301275
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001276/* zram-control sysfs attributes */
Greg Kroah-Hartman27104a52017-06-08 10:12:39 +02001277
1278/*
1279 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1280 * sense that reading from this file does alter the state of your system -- it
1281 * creates a new un-initialized zram device and returns back this device's
1282 * device_id (or an error code if it fails to create a new device).
1283 */
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001284static ssize_t hot_add_show(struct class *class,
1285 struct class_attribute *attr,
1286 char *buf)
1287{
1288 int ret;
1289
1290 mutex_lock(&zram_index_mutex);
1291 ret = zram_add();
1292 mutex_unlock(&zram_index_mutex);
1293
1294 if (ret < 0)
1295 return ret;
1296 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1297}
Greg Kroah-Hartmanf40609d2017-06-13 09:12:46 +02001298static CLASS_ATTR_RO(hot_add);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001299
1300static ssize_t hot_remove_store(struct class *class,
1301 struct class_attribute *attr,
1302 const char *buf,
1303 size_t count)
1304{
1305 struct zram *zram;
1306 int ret, dev_id;
1307
1308 /* dev_id is gendisk->first_minor, which is `int' */
1309 ret = kstrtoint(buf, 10, &dev_id);
1310 if (ret)
1311 return ret;
1312 if (dev_id < 0)
1313 return -EINVAL;
1314
1315 mutex_lock(&zram_index_mutex);
1316
1317 zram = idr_find(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001318 if (zram) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001319 ret = zram_remove(zram);
Takashi Iwai529e71e2016-11-30 15:54:08 -08001320 if (!ret)
1321 idr_remove(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001322 } else {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001323 ret = -ENODEV;
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001324 }
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001325
1326 mutex_unlock(&zram_index_mutex);
1327 return ret ? ret : count;
1328}
Greg Kroah-Hartman27104a52017-06-08 10:12:39 +02001329static CLASS_ATTR_WO(hot_remove);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001330
Greg Kroah-Hartman27104a52017-06-08 10:12:39 +02001331static struct attribute *zram_control_class_attrs[] = {
1332 &class_attr_hot_add.attr,
1333 &class_attr_hot_remove.attr,
1334 NULL,
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001335};
Greg Kroah-Hartman27104a52017-06-08 10:12:39 +02001336ATTRIBUTE_GROUPS(zram_control_class);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001337
1338static struct class zram_control_class = {
1339 .name = "zram-control",
1340 .owner = THIS_MODULE,
Greg Kroah-Hartman27104a52017-06-08 10:12:39 +02001341 .class_groups = zram_control_class_groups,
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001342};
1343
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001344static int zram_remove_cb(int id, void *ptr, void *data)
1345{
1346 zram_remove(ptr);
1347 return 0;
1348}
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001349
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001350static void destroy_devices(void)
1351{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001352 class_unregister(&zram_control_class);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001353 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1354 idr_destroy(&zram_index_idr);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001355 unregister_blkdev(zram_major, "zram");
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001356 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301357}
1358
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301359static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301360{
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001361 int ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301362
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001363 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1364 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1365 if (ret < 0)
1366 return ret;
1367
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001368 ret = class_register(&zram_control_class);
1369 if (ret) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001370 pr_err("Unable to register zram-control class\n");
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001371 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001372 return ret;
1373 }
1374
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301375 zram_major = register_blkdev(0, "zram");
1376 if (zram_major <= 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001377 pr_err("Unable to get major number\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001378 class_unregister(&zram_control_class);
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001379 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001380 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301381 }
1382
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001383 while (num_devices != 0) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001384 mutex_lock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001385 ret = zram_add();
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001386 mutex_unlock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001387 if (ret < 0)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001388 goto out_error;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001389 num_devices--;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301390 }
1391
Nitin Gupta306b0c92009-09-22 10:26:53 +05301392 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301393
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001394out_error:
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001395 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301396 return ret;
1397}
1398
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301399static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301400{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001401 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301402}
1403
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301404module_init(zram_init);
1405module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301406
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001407module_param(num_devices, uint, 0);
Sergey Senozhatskyc3cdb402015-06-25 15:00:11 -07001408MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001409
Nitin Gupta306b0c92009-09-22 10:26:53 +05301410MODULE_LICENSE("Dual BSD/GPL");
1411MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301412MODULE_DESCRIPTION("Compressed RAM Block Device");