blob: dceb5edd1e5455f4c1b101e8ad3ce4dba46ac22f [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070020#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053021#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/buffer_head.h>
24#include <linux/device.h>
25#include <linux/genhd.h>
26#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Minchan Kimb09ab052017-01-10 16:58:21 -080028#include <linux/backing-dev.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053029#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053030#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070031#include <linux/err.h>
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070032#include <linux/idr.h>
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070033#include <linux/sysfs.h>
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +010034#include <linux/cpuhotplug.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053035
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053036#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053037
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070038static DEFINE_IDR(zram_index_idr);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070039/* idr index must be protected */
40static DEFINE_MUTEX(zram_index_mutex);
41
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053042static int zram_major;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070043static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053044
Nitin Gupta306b0c92009-09-22 10:26:53 +053045/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080046static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053047
Minchan Kim08eee692015-02-12 15:00:45 -080048static inline bool init_done(struct zram *zram)
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070049{
Minchan Kim08eee692015-02-12 15:00:45 -080050 return zram->disksize;
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070051}
52
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030053static inline struct zram *dev_to_zram(struct device *dev)
54{
55 return (struct zram *)dev_to_disk(dev)->private_data;
56}
57
Sergey Senozhatskyb31177f2015-06-25 15:00:16 -070058/* flag operations require table entry bit_spin_lock() being held */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070059static int zram_test_flag(struct zram_meta *meta, u32 index,
60 enum zram_pageflags flag)
Andrew Morton99ebbd32015-05-05 16:23:25 -070061{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070062 return meta->table[index].value & BIT(flag);
Andrew Morton99ebbd32015-05-05 16:23:25 -070063}
64
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070065static void zram_set_flag(struct zram_meta *meta, u32 index,
66 enum zram_pageflags flag)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030067{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070068 meta->table[index].value |= BIT(flag);
69}
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030070
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070071static void zram_clear_flag(struct zram_meta *meta, u32 index,
72 enum zram_pageflags flag)
73{
74 meta->table[index].value &= ~BIT(flag);
75}
76
zhouxianrong8e19d542017-02-24 14:59:27 -080077static inline void zram_set_element(struct zram_meta *meta, u32 index,
78 unsigned long element)
79{
80 meta->table[index].element = element;
81}
82
83static inline void zram_clear_element(struct zram_meta *meta, u32 index)
84{
85 meta->table[index].element = 0;
86}
87
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070088static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
89{
90 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
91}
92
93static void zram_set_obj_size(struct zram_meta *meta,
94 u32 index, size_t size)
95{
96 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
97
98 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
99}
100
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800101static inline bool is_partial_io(struct bio_vec *bvec)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700102{
103 return bvec->bv_len != PAGE_SIZE;
104}
105
Minchan Kimb09ab052017-01-10 16:58:21 -0800106static void zram_revalidate_disk(struct zram *zram)
107{
108 revalidate_disk(zram->disk);
109 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
Jens Axboee1735492017-02-02 16:53:07 -0700110 zram->disk->queue->backing_dev_info->capabilities |=
Minchan Kimb09ab052017-01-10 16:58:21 -0800111 BDI_CAP_STABLE_WRITES;
112}
113
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700114/*
115 * Check if request is within bounds and aligned on zram logical blocks.
116 */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800117static inline bool valid_io_request(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700118 sector_t start, unsigned int size)
119{
120 u64 end, bound;
121
122 /* unaligned request */
123 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800124 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700125 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800126 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700127
128 end = start + (size >> SECTOR_SHIFT);
129 bound = zram->disksize >> SECTOR_SHIFT;
130 /* out of range range */
131 if (unlikely(start >= bound || end > bound || start > end))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800132 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700133
134 /* I/O request is valid */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800135 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700136}
137
138static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
139{
140 if (*offset + bvec->bv_len >= PAGE_SIZE)
141 (*index)++;
142 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
143}
144
145static inline void update_used_max(struct zram *zram,
146 const unsigned long pages)
147{
148 unsigned long old_max, cur_max;
149
150 old_max = atomic_long_read(&zram->stats.max_used_pages);
151
152 do {
153 cur_max = old_max;
154 if (pages > cur_max)
155 old_max = atomic_long_cmpxchg(
156 &zram->stats.max_used_pages, cur_max, pages);
157 } while (old_max != cur_max);
158}
159
zhouxianrong8e19d542017-02-24 14:59:27 -0800160static inline void zram_fill_page(char *ptr, unsigned long len,
161 unsigned long value)
162{
163 int i;
164 unsigned long *page = (unsigned long *)ptr;
165
166 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
167
168 if (likely(value == 0)) {
169 memset(ptr, 0, len);
170 } else {
171 for (i = 0; i < len / sizeof(*page); i++)
172 page[i] = value;
173 }
174}
175
176static bool page_same_filled(void *ptr, unsigned long *element)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700177{
178 unsigned int pos;
179 unsigned long *page;
180
181 page = (unsigned long *)ptr;
182
zhouxianrong8e19d542017-02-24 14:59:27 -0800183 for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
184 if (page[pos] != page[pos + 1])
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800185 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700186 }
187
zhouxianrong8e19d542017-02-24 14:59:27 -0800188 *element = page[pos];
189
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800190 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700191}
192
zhouxianrong8e19d542017-02-24 14:59:27 -0800193static void handle_same_page(struct bio_vec *bvec, unsigned long element)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700194{
195 struct page *page = bvec->bv_page;
196 void *user_mem;
197
198 user_mem = kmap_atomic(page);
zhouxianrong8e19d542017-02-24 14:59:27 -0800199 zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700200 kunmap_atomic(user_mem);
201
202 flush_dcache_page(page);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300203}
204
205static ssize_t initstate_show(struct device *dev,
206 struct device_attribute *attr, char *buf)
207{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700208 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300209 struct zram *zram = dev_to_zram(dev);
210
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700211 down_read(&zram->init_lock);
212 val = init_done(zram);
213 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300214
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700215 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300216}
217
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700218static ssize_t disksize_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 struct zram *zram = dev_to_zram(dev);
222
223 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
224}
225
Minchan Kim9ada9da2014-10-09 15:29:53 -0700226static ssize_t mem_limit_store(struct device *dev,
227 struct device_attribute *attr, const char *buf, size_t len)
228{
229 u64 limit;
230 char *tmp;
231 struct zram *zram = dev_to_zram(dev);
232
233 limit = memparse(buf, &tmp);
234 if (buf == tmp) /* no chars parsed, invalid input */
235 return -EINVAL;
236
237 down_write(&zram->init_lock);
238 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
239 up_write(&zram->init_lock);
240
241 return len;
242}
243
Minchan Kim461a8ee2014-10-09 15:29:55 -0700244static ssize_t mem_used_max_store(struct device *dev,
245 struct device_attribute *attr, const char *buf, size_t len)
246{
247 int err;
248 unsigned long val;
249 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700250
251 err = kstrtoul(buf, 10, &val);
252 if (err || val != 0)
253 return -EINVAL;
254
255 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700256 if (init_done(zram)) {
257 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700258 atomic_long_set(&zram->stats.max_used_pages,
259 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700260 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700261 up_read(&zram->init_lock);
262
263 return len;
264}
265
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700266/*
267 * We switched to per-cpu streams and this attr is not needed anymore.
268 * However, we will keep it around for some time, because:
269 * a) we may revert per-cpu streams in the future
270 * b) it's visible to user space and we need to follow our 2 years
271 * retirement rule; but we already have a number of 'soon to be
272 * altered' attrs, so max_comp_streams need to wait for the next
273 * layoff cycle.
274 */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700275static ssize_t max_comp_streams_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700278 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700279}
280
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700281static ssize_t max_comp_streams_store(struct device *dev,
282 struct device_attribute *attr, const char *buf, size_t len)
283{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700284 return len;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700285}
286
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700287static ssize_t comp_algorithm_show(struct device *dev,
288 struct device_attribute *attr, char *buf)
289{
290 size_t sz;
291 struct zram *zram = dev_to_zram(dev);
292
293 down_read(&zram->init_lock);
294 sz = zcomp_available_show(zram->compressor, buf);
295 up_read(&zram->init_lock);
296
297 return sz;
298}
299
300static ssize_t comp_algorithm_store(struct device *dev,
301 struct device_attribute *attr, const char *buf, size_t len)
302{
303 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700304 char compressor[CRYPTO_MAX_ALG_NAME];
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700305 size_t sz;
306
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700307 strlcpy(compressor, buf, sizeof(compressor));
308 /* ignore trailing newline */
309 sz = strlen(compressor);
310 if (sz > 0 && compressor[sz - 1] == '\n')
311 compressor[sz - 1] = 0x00;
312
313 if (!zcomp_available_algorithm(compressor))
Luis Henriques1d5b43b2015-11-06 16:29:01 -0800314 return -EINVAL;
315
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700316 down_write(&zram->init_lock);
317 if (init_done(zram)) {
318 up_write(&zram->init_lock);
319 pr_info("Can't change algorithm for initialized device\n");
320 return -EBUSY;
321 }
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700322
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700323 strlcpy(zram->compressor, compressor, sizeof(compressor));
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700324 up_write(&zram->init_lock);
325 return len;
326}
327
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700328static ssize_t compact_store(struct device *dev,
329 struct device_attribute *attr, const char *buf, size_t len)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530330{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700331 struct zram *zram = dev_to_zram(dev);
332 struct zram_meta *meta;
333
334 down_read(&zram->init_lock);
335 if (!init_done(zram)) {
336 up_read(&zram->init_lock);
337 return -EINVAL;
338 }
339
340 meta = zram->meta;
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700341 zs_compact(meta->mem_pool);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700342 up_read(&zram->init_lock);
343
344 return len;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530345}
346
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700347static ssize_t io_stat_show(struct device *dev,
348 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530349{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700350 struct zram *zram = dev_to_zram(dev);
351 ssize_t ret;
352
353 down_read(&zram->init_lock);
354 ret = scnprintf(buf, PAGE_SIZE,
355 "%8llu %8llu %8llu %8llu\n",
356 (u64)atomic64_read(&zram->stats.failed_reads),
357 (u64)atomic64_read(&zram->stats.failed_writes),
358 (u64)atomic64_read(&zram->stats.invalid_io),
359 (u64)atomic64_read(&zram->stats.notify_free));
360 up_read(&zram->init_lock);
361
362 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530363}
364
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700365static ssize_t mm_stat_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530367{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700368 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700369 struct zs_pool_stats pool_stats;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700370 u64 orig_size, mem_used = 0;
371 long max_used;
372 ssize_t ret;
373
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700374 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
375
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700376 down_read(&zram->init_lock);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700377 if (init_done(zram)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700378 mem_used = zs_get_total_pages(zram->meta->mem_pool);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700379 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
380 }
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700381
382 orig_size = atomic64_read(&zram->stats.pages_stored);
383 max_used = atomic_long_read(&zram->stats.max_used_pages);
384
385 ret = scnprintf(buf, PAGE_SIZE,
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700386 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700387 orig_size << PAGE_SHIFT,
388 (u64)atomic64_read(&zram->stats.compr_data_size),
389 mem_used << PAGE_SHIFT,
390 zram->limit_pages << PAGE_SHIFT,
391 max_used << PAGE_SHIFT,
zhouxianrong8e19d542017-02-24 14:59:27 -0800392 (u64)atomic64_read(&zram->stats.same_pages),
Sergey Senozhatsky860c7072015-09-08 15:04:38 -0700393 pool_stats.pages_compacted);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700394 up_read(&zram->init_lock);
395
396 return ret;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700397}
398
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700399static ssize_t debug_stat_show(struct device *dev,
400 struct device_attribute *attr, char *buf)
401{
402 int version = 1;
403 struct zram *zram = dev_to_zram(dev);
404 ssize_t ret;
405
406 down_read(&zram->init_lock);
407 ret = scnprintf(buf, PAGE_SIZE,
408 "version: %d\n%8llu\n",
409 version,
410 (u64)atomic64_read(&zram->stats.writestall));
411 up_read(&zram->init_lock);
412
413 return ret;
414}
415
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700416static DEVICE_ATTR_RO(io_stat);
417static DEVICE_ATTR_RO(mm_stat);
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700418static DEVICE_ATTR_RO(debug_stat);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700419
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800420static void zram_meta_free(struct zram_meta *meta, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300421{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800422 size_t num_pages = disksize >> PAGE_SHIFT;
423 size_t index;
424
425 /* Free all pages that are still in this zram device */
426 for (index = 0; index < num_pages; index++) {
427 unsigned long handle = meta->table[index].handle;
zhouxianrong8e19d542017-02-24 14:59:27 -0800428 /*
429 * No memory is allocated for same element filled pages.
430 * Simply clear same page flag.
431 */
432 if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800433 continue;
434
435 zs_free(meta->mem_pool, handle);
436 }
437
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300438 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300439 vfree(meta->table);
440 kfree(meta);
441}
442
Sergey Senozhatsky4ce321f2015-08-14 15:35:19 -0700443static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300444{
445 size_t num_pages;
446 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800447
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300448 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800449 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300450
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300451 num_pages = disksize >> PAGE_SHIFT;
452 meta->table = vzalloc(num_pages * sizeof(*meta->table));
453 if (!meta->table) {
454 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800455 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300456 }
457
Sergey Senozhatskyd0d8da22016-05-20 16:59:48 -0700458 meta->mem_pool = zs_create_pool(pool_name);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300459 if (!meta->mem_pool) {
460 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800461 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300462 }
463
464 return meta;
465
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800466out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300467 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300468 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800469 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300470}
471
Weijie Yangd2d5e762014-08-06 16:08:31 -0700472/*
473 * To protect concurrent access to the same index entry,
474 * caller should hold this table index entry's bit_spinlock to
475 * indicate this index entry is accessing.
476 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530477static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530478{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900479 struct zram_meta *meta = zram->meta;
480 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530481
zhouxianrong8e19d542017-02-24 14:59:27 -0800482 /*
483 * No memory is allocated for same element filled pages.
484 * Simply clear same page flag.
485 */
486 if (zram_test_flag(meta, index, ZRAM_SAME)) {
487 zram_clear_flag(meta, index, ZRAM_SAME);
488 zram_clear_element(meta, index);
489 atomic64_dec(&zram->stats.same_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530490 return;
491 }
492
zhouxianrong8e19d542017-02-24 14:59:27 -0800493 if (!handle)
494 return;
495
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900496 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530497
Weijie Yangd2d5e762014-08-06 16:08:31 -0700498 atomic64_sub(zram_get_obj_size(meta, index),
499 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700500 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530501
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900502 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700503 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530504}
505
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300506static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530507{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700508 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200509 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900510 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800511 unsigned long handle;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700512 unsigned int size;
Minchan Kim92967472014-01-30 15:46:03 -0800513
Weijie Yangd2d5e762014-08-06 16:08:31 -0700514 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800515 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700516 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200517
zhouxianrong8e19d542017-02-24 14:59:27 -0800518 if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700519 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
zhouxianrong8e19d542017-02-24 14:59:27 -0800520 zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
Jerome Marchand924bd882011-06-10 15:28:48 +0200521 return 0;
522 }
523
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700525 if (size == PAGE_SIZE) {
Jiang Liu42e99bd2013-06-07 00:07:30 +0800526 copy_page(mem, cmem);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700527 } else {
528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529
530 ret = zcomp_decompress(zstrm, cmem, size, mem);
531 zcomp_stream_put(zram->comp);
532 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900533 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700534 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200535
536 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700537 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200538 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200539 return ret;
540 }
541
542 return 0;
543}
544
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300545static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800546 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300547{
548 int ret;
549 struct page *page;
550 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900551 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300552 page = bvec->bv_page;
553
Weijie Yangd2d5e762014-08-06 16:08:31 -0700554 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900555 if (unlikely(!meta->table[index].handle) ||
zhouxianrong8e19d542017-02-24 14:59:27 -0800556 zram_test_flag(meta, index, ZRAM_SAME)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700557 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
zhouxianrong8e19d542017-02-24 14:59:27 -0800558 handle_same_page(bvec, meta->table[index].element);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300559 return 0;
560 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700561 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300562
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300563 if (is_partial_io(bvec))
564 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900565 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
566
567 user_mem = kmap_atomic(page);
568 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300569 uncmem = user_mem;
570
571 if (!uncmem) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -0700572 pr_err("Unable to allocate temp memory\n");
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300573 ret = -ENOMEM;
574 goto out_cleanup;
575 }
576
577 ret = zram_decompress_page(zram, uncmem, index);
578 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700579 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300580 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300581
582 if (is_partial_io(bvec))
583 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
584 bvec->bv_len);
585
586 flush_dcache_page(page);
587 ret = 0;
588out_cleanup:
589 kunmap_atomic(user_mem);
590 if (is_partial_io(bvec))
591 kfree(uncmem);
592 return ret;
593}
594
Jerome Marchand924bd882011-06-10 15:28:48 +0200595static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
596 int offset)
597{
Nitin Gupta397c6062013-01-02 08:53:41 -0800598 int ret = 0;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700599 unsigned int clen;
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700600 unsigned long handle = 0;
Minchan Kim130f3152012-06-08 15:39:27 +0900601 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200602 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900603 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700604 struct zcomp_strm *zstrm = NULL;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700605 unsigned long alloced_pages;
zhouxianrong8e19d542017-02-24 14:59:27 -0800606 unsigned long element;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200607
608 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200609 if (is_partial_io(bvec)) {
610 /*
611 * This is a partial IO. We need to read the full page
612 * before to write the changes.
613 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900614 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200615 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200616 ret = -ENOMEM;
617 goto out;
618 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300619 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800620 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200621 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200622 }
623
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700624compress_again:
Cong Wangba82fe22011-11-25 23:14:25 +0800625 user_mem = kmap_atomic(page);
Nitin Gupta397c6062013-01-02 08:53:41 -0800626 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200627 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
628 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800629 kunmap_atomic(user_mem);
630 user_mem = NULL;
631 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200632 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800633 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200634
zhouxianrong8e19d542017-02-24 14:59:27 -0800635 if (page_same_filled(uncmem, &element)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800636 if (user_mem)
637 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900638 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700639 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900640 zram_free_page(zram, index);
zhouxianrong8e19d542017-02-24 14:59:27 -0800641 zram_set_flag(meta, index, ZRAM_SAME);
642 zram_set_element(meta, index, element);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700643 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900644
zhouxianrong8e19d542017-02-24 14:59:27 -0800645 atomic64_inc(&zram->stats.same_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200646 ret = 0;
647 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200648 }
649
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700650 zstrm = zcomp_stream_get(zram->comp);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700651 ret = zcomp_compress(zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800652 if (!is_partial_io(bvec)) {
653 kunmap_atomic(user_mem);
654 user_mem = NULL;
655 uncmem = NULL;
656 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200657
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700658 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200659 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200660 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200661 }
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700662
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700663 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700664 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700665 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800666 if (is_partial_io(bvec))
667 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700668 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200669
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700670 /*
671 * handle allocation has 2 paths:
672 * a) fast path is executed with preemption disabled (for
673 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
674 * since we can't sleep;
675 * b) slow path enables preemption and attempts to allocate
676 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
677 * put per-cpu compression stream and, thus, to re-do
678 * the compression once handle is allocated.
679 *
680 * if we have a 'non-null' handle here then we are coming
681 * from the slow path and handle has already been allocated.
682 */
683 if (!handle)
684 handle = zs_malloc(meta->mem_pool, clen,
685 __GFP_KSWAPD_RECLAIM |
686 __GFP_NOWARN |
Minchan Kim9bc482d2016-07-26 15:23:34 -0700687 __GFP_HIGHMEM |
688 __GFP_MOVABLE);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600689 if (!handle) {
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700690 zcomp_stream_put(zram->comp);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700691 zstrm = NULL;
692
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700693 atomic64_inc(&zram->stats.writestall);
694
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700695 handle = zs_malloc(meta->mem_pool, clen,
Minchan Kim9bc482d2016-07-26 15:23:34 -0700696 GFP_NOIO | __GFP_HIGHMEM |
697 __GFP_MOVABLE);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700698 if (handle)
699 goto compress_again;
700
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700701 pr_err("Error allocating memory for compressed page: %u, size=%u\n",
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400702 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200703 ret = -ENOMEM;
704 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200705 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700706
Minchan Kim461a8ee2014-10-09 15:29:55 -0700707 alloced_pages = zs_get_total_pages(meta->mem_pool);
Sergey SENOZHATSKY12372752015-11-06 16:29:04 -0800708 update_used_max(zram, alloced_pages);
709
Minchan Kim461a8ee2014-10-09 15:29:55 -0700710 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700711 zs_free(meta->mem_pool, handle);
712 ret = -ENOMEM;
713 goto out;
714 }
715
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900716 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200717
Jiang Liu42e99bd2013-06-07 00:07:30 +0800718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800719 src = kmap_atomic(page);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800720 copy_page(cmem, src);
Nitin Gupta397c6062013-01-02 08:53:41 -0800721 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800722 } else {
723 memcpy(cmem, src, clen);
724 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200725
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700726 zcomp_stream_put(zram->comp);
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700727 zstrm = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900728 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600729
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900730 /*
731 * Free memory associated with this sector
732 * before overwriting unused sectors.
733 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700734 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900735 zram_free_page(zram, index);
736
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900737 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700738 zram_set_obj_size(meta, index, clen);
739 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200740
741 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700742 atomic64_add(clen, &zram->stats.compr_data_size);
743 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200744out:
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700745 if (zstrm)
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700746 zcomp_stream_put(zram->comp);
Nitin Gupta397c6062013-01-02 08:53:41 -0800747 if (is_partial_io(bvec))
748 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200749 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200750}
751
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700752/*
753 * zram_bio_discard - handler on discard request
754 * @index: physical block index in PAGE_SIZE units
755 * @offset: byte offset within physical block
756 */
757static void zram_bio_discard(struct zram *zram, u32 index,
758 int offset, struct bio *bio)
759{
760 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700761 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700762
763 /*
764 * zram manages data in physical block size units. Because logical block
765 * size isn't identical with physical block size on some arch, we
766 * could get a discard request pointing to a specific offset within a
767 * certain physical block. Although we can handle this request by
768 * reading that physiclal block and decompressing and partially zeroing
769 * and re-compressing and then re-storing it, this isn't reasonable
770 * because our intent with a discard request is to save memory. So
771 * skipping this logical block is appropriate here.
772 */
773 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700774 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700775 return;
776
Weijie Yang38515c72014-06-04 16:11:06 -0700777 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700778 index++;
779 }
780
781 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700782 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700783 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700784 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700785 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700786 index++;
787 n -= PAGE_SIZE;
788 }
789}
790
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700791static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Jens Axboec11f0c02016-08-05 08:11:04 -0600792 int offset, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700793{
794 unsigned long start_time = jiffies;
Jens Axboec11f0c02016-08-05 08:11:04 -0600795 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700796 int ret;
797
Jens Axboec11f0c02016-08-05 08:11:04 -0600798 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700799 &zram->disk->part0);
800
Jens Axboec11f0c02016-08-05 08:11:04 -0600801 if (!is_write) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700802 atomic64_inc(&zram->stats.num_reads);
803 ret = zram_bvec_read(zram, bvec, index, offset);
804 } else {
805 atomic64_inc(&zram->stats.num_writes);
806 ret = zram_bvec_write(zram, bvec, index, offset);
807 }
808
Jens Axboec11f0c02016-08-05 08:11:04 -0600809 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700810
811 if (unlikely(ret)) {
Jens Axboec11f0c02016-08-05 08:11:04 -0600812 if (!is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700813 atomic64_inc(&zram->stats.failed_reads);
814 else
815 atomic64_inc(&zram->stats.failed_writes);
816 }
817
818 return ret;
819}
820
821static void __zram_make_request(struct zram *zram, struct bio *bio)
822{
Mike Christieabf54542016-08-04 14:23:34 -0600823 int offset;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700824 u32 index;
825 struct bio_vec bvec;
826 struct bvec_iter iter;
827
828 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
829 offset = (bio->bi_iter.bi_sector &
830 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
831
Mike Christie95fe6c12016-06-05 14:31:48 -0500832 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700833 zram_bio_discard(zram, index, offset, bio);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200834 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700835 return;
836 }
837
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700838 bio_for_each_segment(bvec, bio, iter) {
839 int max_transfer_size = PAGE_SIZE - offset;
840
841 if (bvec.bv_len > max_transfer_size) {
842 /*
843 * zram_bvec_rw() can only make operation on a single
844 * zram page. Split the bio vector.
845 */
846 struct bio_vec bv;
847
848 bv.bv_page = bvec.bv_page;
849 bv.bv_len = max_transfer_size;
850 bv.bv_offset = bvec.bv_offset;
851
Mike Christieabf54542016-08-04 14:23:34 -0600852 if (zram_bvec_rw(zram, &bv, index, offset,
Jens Axboec11f0c02016-08-05 08:11:04 -0600853 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700854 goto out;
855
856 bv.bv_len = bvec.bv_len - max_transfer_size;
857 bv.bv_offset += max_transfer_size;
Mike Christieabf54542016-08-04 14:23:34 -0600858 if (zram_bvec_rw(zram, &bv, index + 1, 0,
Jens Axboec11f0c02016-08-05 08:11:04 -0600859 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700860 goto out;
861 } else
Mike Christieabf54542016-08-04 14:23:34 -0600862 if (zram_bvec_rw(zram, &bvec, index, offset,
Jens Axboec11f0c02016-08-05 08:11:04 -0600863 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700864 goto out;
865
866 update_position(&index, &offset, &bvec);
867 }
868
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200869 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700870 return;
871
872out:
873 bio_io_error(bio);
874}
875
876/*
877 * Handler function for all zram I/O requests.
878 */
Jens Axboedece1632015-11-05 10:41:16 -0700879static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700880{
881 struct zram *zram = queue->queuedata;
882
Kent Overstreet54efd502015-04-23 22:37:18 -0700883 blk_queue_split(queue, &bio, queue->bio_split);
884
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700885 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
886 bio->bi_iter.bi_size)) {
887 atomic64_inc(&zram->stats.invalid_io);
Minchan Kima09759a2017-02-24 14:56:47 -0800888 goto error;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700889 }
890
891 __zram_make_request(zram, bio);
Jens Axboedece1632015-11-05 10:41:16 -0700892 return BLK_QC_T_NONE;
Minchan Kima09759a2017-02-24 14:56:47 -0800893
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700894error:
895 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700896 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700897}
898
899static void zram_slot_free_notify(struct block_device *bdev,
900 unsigned long index)
901{
902 struct zram *zram;
903 struct zram_meta *meta;
904
905 zram = bdev->bd_disk->private_data;
906 meta = zram->meta;
907
908 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
909 zram_free_page(zram, index);
910 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
911 atomic64_inc(&zram->stats.notify_free);
912}
913
914static int zram_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -0600915 struct page *page, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700916{
917 int offset, err = -EIO;
918 u32 index;
919 struct zram *zram;
920 struct bio_vec bv;
921
922 zram = bdev->bd_disk->private_data;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700923
924 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
925 atomic64_inc(&zram->stats.invalid_io);
926 err = -EINVAL;
Minchan Kima09759a2017-02-24 14:56:47 -0800927 goto out;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700928 }
929
930 index = sector >> SECTORS_PER_PAGE_SHIFT;
931 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
932
933 bv.bv_page = page;
934 bv.bv_len = PAGE_SIZE;
935 bv.bv_offset = 0;
936
Jens Axboec11f0c02016-08-05 08:11:04 -0600937 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700938out:
939 /*
940 * If I/O fails, just return error(ie, non-zero) without
941 * calling page_endio.
942 * It causes resubmit the I/O with bio request by upper functions
943 * of rw_page(e.g., swap_readpage, __swap_writepage) and
944 * bio->bi_end_io does things to handle the error
945 * (e.g., SetPageError, set_page_dirty and extra works).
946 */
947 if (err == 0)
Jens Axboec11f0c02016-08-05 08:11:04 -0600948 page_endio(page, is_write, 0);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700949 return err;
950}
951
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -0800952static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +0200953{
Minchan Kim08eee692015-02-12 15:00:45 -0800954 struct zram_meta *meta;
955 struct zcomp *comp;
956 u64 disksize;
957
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300958 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -0700959
960 zram->limit_pages = 0;
961
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -0700962 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300963 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300964 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300965 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300966
Minchan Kim08eee692015-02-12 15:00:45 -0800967 meta = zram->meta;
968 comp = zram->comp;
969 disksize = zram->disksize;
Minchan Kim08eee692015-02-12 15:00:45 -0800970
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300971 /* Reset stats */
972 memset(&zram->stats, 0, sizeof(zram->stats));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300973 zram->disksize = 0;
Weijie Yangd7ad41a2015-06-10 11:14:49 -0700974
Sergey Senozhatskya096caf2015-02-12 15:00:39 -0800975 set_capacity(zram->disk, 0);
Weijie Yangd7ad41a2015-06-10 11:14:49 -0700976 part_stat_set_all(&zram->disk->part0, 0);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -0800977
Sergey Senozhatsky644d4782013-06-26 15:28:39 +0300978 up_write(&zram->init_lock);
Minchan Kim08eee692015-02-12 15:00:45 -0800979 /* I/O operation under all of CPU are done so let's free */
980 zram_meta_free(meta, disksize);
981 zcomp_destroy(comp);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300982}
983
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300984static ssize_t disksize_store(struct device *dev,
985 struct device_attribute *attr, const char *buf, size_t len)
986{
987 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -0700988 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300989 struct zram_meta *meta;
990 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -0700991 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300992
993 disksize = memparse(buf, NULL);
994 if (!disksize)
995 return -EINVAL;
996
997 disksize = PAGE_ALIGN(disksize);
Sergey Senozhatsky4ce321f2015-08-14 15:35:19 -0700998 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -0800999 if (!meta)
1000 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001001
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001002 comp = zcomp_create(zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001003 if (IS_ERR(comp)) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001004 pr_err("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001005 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001006 err = PTR_ERR(comp);
1007 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001008 }
1009
1010 down_write(&zram->init_lock);
1011 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001012 pr_info("Cannot change disksize for initialized device\n");
1013 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001014 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001015 }
1016
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001017 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001018 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001019 zram->disksize = disksize;
1020 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Minchan Kimb09ab052017-01-10 16:58:21 -08001021 zram_revalidate_disk(zram);
Minchan Kime7ccfc42017-01-10 16:58:18 -08001022 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -07001023
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001024 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001025
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001026out_destroy_comp:
1027 up_write(&zram->init_lock);
1028 zcomp_destroy(comp);
1029out_free_meta:
Ganesh Mahendran1fec1172015-02-12 15:00:33 -08001030 zram_meta_free(meta, disksize);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001031 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001032}
1033
1034static ssize_t reset_store(struct device *dev,
1035 struct device_attribute *attr, const char *buf, size_t len)
1036{
1037 int ret;
1038 unsigned short do_reset;
1039 struct zram *zram;
1040 struct block_device *bdev;
1041
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001042 ret = kstrtou16(buf, 10, &do_reset);
1043 if (ret)
1044 return ret;
1045
1046 if (!do_reset)
1047 return -EINVAL;
1048
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001049 zram = dev_to_zram(dev);
1050 bdev = bdget_disk(zram->disk, 0);
Rashika Kheria46a51c82013-10-30 18:36:32 +05301051 if (!bdev)
1052 return -ENOMEM;
1053
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001054 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001055 /* Do not reset an active device or claimed device */
1056 if (bdev->bd_openers || zram->claim) {
1057 mutex_unlock(&bdev->bd_mutex);
1058 bdput(bdev);
1059 return -EBUSY;
Rashika Kheria1b672222013-11-10 22:13:53 +05301060 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001061
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001062 /* From now on, anyone can't open /dev/zram[0-9] */
1063 zram->claim = true;
1064 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001065
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001066 /* Make sure all the pending I/O are finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +05301067 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001068 zram_reset_device(zram);
Minchan Kimb09ab052017-01-10 16:58:21 -08001069 zram_revalidate_disk(zram);
Rashika Kheria1b672222013-11-10 22:13:53 +05301070 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001071
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001072 mutex_lock(&bdev->bd_mutex);
1073 zram->claim = false;
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001074 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001075
1076 return len;
1077}
1078
1079static int zram_open(struct block_device *bdev, fmode_t mode)
1080{
1081 int ret = 0;
1082 struct zram *zram;
1083
1084 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1085
1086 zram = bdev->bd_disk->private_data;
1087 /* zram was claimed to reset so open request fails */
1088 if (zram->claim)
1089 ret = -EBUSY;
1090
Rashika Kheria1b672222013-11-10 22:13:53 +05301091 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001092}
1093
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301094static const struct block_device_operations zram_devops = {
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001095 .open = zram_open,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301096 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -08001097 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +05301098 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +05301099};
1100
Andrew Morton99ebbd32015-05-05 16:23:25 -07001101static DEVICE_ATTR_WO(compact);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001102static DEVICE_ATTR_RW(disksize);
1103static DEVICE_ATTR_RO(initstate);
1104static DEVICE_ATTR_WO(reset);
Sergey Senozhatskyc87d1652017-02-22 15:46:45 -08001105static DEVICE_ATTR_WO(mem_limit);
1106static DEVICE_ATTR_WO(mem_used_max);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001107static DEVICE_ATTR_RW(max_comp_streams);
1108static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001109
1110static struct attribute *zram_disk_attrs[] = {
1111 &dev_attr_disksize.attr,
1112 &dev_attr_initstate.attr,
1113 &dev_attr_reset.attr,
Andrew Morton99ebbd32015-05-05 16:23:25 -07001114 &dev_attr_compact.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001115 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001116 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001117 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001118 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky2f6a3be2015-04-15 16:16:03 -07001119 &dev_attr_io_stat.attr,
Sergey Senozhatsky4f2109f2015-04-15 16:16:06 -07001120 &dev_attr_mm_stat.attr,
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001121 &dev_attr_debug_stat.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001122 NULL,
1123};
1124
1125static struct attribute_group zram_disk_attr_group = {
1126 .attrs = zram_disk_attrs,
1127};
1128
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001129/*
1130 * Allocate and initialize new zram device. the function returns
1131 * '>= 0' device_id upon success, and negative value otherwise.
1132 */
1133static int zram_add(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301134{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001135 struct zram *zram;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001136 struct request_queue *queue;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001137 int ret, device_id;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001138
1139 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1140 if (!zram)
1141 return -ENOMEM;
1142
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001143 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001144 if (ret < 0)
1145 goto out_free_dev;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001146 device_id = ret;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301147
Jerome Marchand0900bea2011-09-06 15:02:11 +02001148 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301149
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001150 queue = blk_alloc_queue(GFP_KERNEL);
1151 if (!queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301152 pr_err("Error allocating disk queue for device %d\n",
1153 device_id);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001154 ret = -ENOMEM;
1155 goto out_free_idr;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301156 }
1157
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001158 blk_queue_make_request(queue, zram_make_request);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301159
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001160 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301161 zram->disk = alloc_disk(1);
1162 if (!zram->disk) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001163 pr_err("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301164 device_id);
Julia Lawall201c7b72015-04-15 16:16:27 -07001165 ret = -ENOMEM;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001166 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301167 }
1168
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301169 zram->disk->major = zram_major;
1170 zram->disk->first_minor = device_id;
1171 zram->disk->fops = &zram_devops;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001172 zram->disk->queue = queue;
1173 zram->disk->queue->queuedata = zram;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301174 zram->disk->private_data = zram;
1175 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301176
Nitin Gupta33863c22010-08-09 22:56:47 +05301177 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301178 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001179 /* zram devices sort of resembles non-rotational disks */
1180 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001181 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301182 /*
1183 * To ensure that we always get PAGE_SIZE aligned
1184 * and n*PAGE_SIZED sized I/O requests.
1185 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301186 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001187 blk_queue_logical_block_size(zram->disk->queue,
1188 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301189 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1190 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001191 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
Johannes Thumshirn0bc31532017-03-06 11:23:35 +01001192 zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
1193 zram->disk->queue->limits.chunk_sectors = 0;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001194 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001195 /*
1196 * zram_bio_discard() will clear all logical blocks if logical block
1197 * size is identical with physical block size(PAGE_SIZE). But if it is
1198 * different, we will skip discarding some parts of logical blocks in
1199 * the part of the request range which isn't aligned to physical block
1200 * size. So we can't ensure that all discarded logical blocks are
1201 * zeroed.
1202 */
1203 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1204 zram->disk->queue->limits.discard_zeroes_data = 1;
1205 else
1206 zram->disk->queue->limits.discard_zeroes_data = 0;
1207 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301208
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301209 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301210
Nitin Gupta33863c22010-08-09 22:56:47 +05301211 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1212 &zram_disk_attr_group);
1213 if (ret < 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001214 pr_err("Error creating sysfs group for device %d\n",
1215 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001216 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301217 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001218 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001219 zram->meta = NULL;
Sergey Senozhatskyd12b63c2015-06-25 15:00:14 -07001220
1221 pr_info("Added device: %s\n", zram->disk->disk_name);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001222 return device_id;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301223
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001224out_free_disk:
1225 del_gendisk(zram->disk);
1226 put_disk(zram->disk);
1227out_free_queue:
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001228 blk_cleanup_queue(queue);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001229out_free_idr:
1230 idr_remove(&zram_index_idr, device_id);
1231out_free_dev:
1232 kfree(zram);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301233 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301234}
1235
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001236static int zram_remove(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301237{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001238 struct block_device *bdev;
1239
1240 bdev = bdget_disk(zram->disk, 0);
1241 if (!bdev)
1242 return -ENOMEM;
1243
1244 mutex_lock(&bdev->bd_mutex);
1245 if (bdev->bd_openers || zram->claim) {
1246 mutex_unlock(&bdev->bd_mutex);
1247 bdput(bdev);
1248 return -EBUSY;
1249 }
1250
1251 zram->claim = true;
1252 mutex_unlock(&bdev->bd_mutex);
1253
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001254 /*
1255 * Remove sysfs first, so no one will perform a disksize
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001256 * store while we destroy the devices. This also helps during
1257 * hot_remove -- zram_reset_device() is the last holder of
1258 * ->init_lock, no later/concurrent disksize_store() or any
1259 * other sysfs handlers are possible.
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001260 */
1261 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1262 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +05301263
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001264 /* Make sure all the pending I/O are finished */
1265 fsync_bdev(bdev);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001266 zram_reset_device(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001267 bdput(bdev);
1268
1269 pr_info("Removed device: %s\n", zram->disk->disk_name);
1270
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001271 blk_cleanup_queue(zram->disk->queue);
1272 del_gendisk(zram->disk);
1273 put_disk(zram->disk);
1274 kfree(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001275 return 0;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001276}
Nitin Gupta306b0c92009-09-22 10:26:53 +05301277
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001278/* zram-control sysfs attributes */
1279static ssize_t hot_add_show(struct class *class,
1280 struct class_attribute *attr,
1281 char *buf)
1282{
1283 int ret;
1284
1285 mutex_lock(&zram_index_mutex);
1286 ret = zram_add();
1287 mutex_unlock(&zram_index_mutex);
1288
1289 if (ret < 0)
1290 return ret;
1291 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1292}
1293
1294static ssize_t hot_remove_store(struct class *class,
1295 struct class_attribute *attr,
1296 const char *buf,
1297 size_t count)
1298{
1299 struct zram *zram;
1300 int ret, dev_id;
1301
1302 /* dev_id is gendisk->first_minor, which is `int' */
1303 ret = kstrtoint(buf, 10, &dev_id);
1304 if (ret)
1305 return ret;
1306 if (dev_id < 0)
1307 return -EINVAL;
1308
1309 mutex_lock(&zram_index_mutex);
1310
1311 zram = idr_find(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001312 if (zram) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001313 ret = zram_remove(zram);
Takashi Iwai529e71e2016-11-30 15:54:08 -08001314 if (!ret)
1315 idr_remove(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001316 } else {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001317 ret = -ENODEV;
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001318 }
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001319
1320 mutex_unlock(&zram_index_mutex);
1321 return ret ? ret : count;
1322}
1323
Sergey Senozhatsky5c7e9cc2016-12-07 14:44:31 -08001324/*
1325 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1326 * sense that reading from this file does alter the state of your system -- it
1327 * creates a new un-initialized zram device and returns back this device's
1328 * device_id (or an error code if it fails to create a new device).
1329 */
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001330static struct class_attribute zram_control_class_attrs[] = {
Sergey Senozhatsky5c7e9cc2016-12-07 14:44:31 -08001331 __ATTR(hot_add, 0400, hot_add_show, NULL),
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001332 __ATTR_WO(hot_remove),
1333 __ATTR_NULL,
1334};
1335
1336static struct class zram_control_class = {
1337 .name = "zram-control",
1338 .owner = THIS_MODULE,
1339 .class_attrs = zram_control_class_attrs,
1340};
1341
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001342static int zram_remove_cb(int id, void *ptr, void *data)
1343{
1344 zram_remove(ptr);
1345 return 0;
1346}
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001347
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001348static void destroy_devices(void)
1349{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001350 class_unregister(&zram_control_class);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001351 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1352 idr_destroy(&zram_index_idr);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001353 unregister_blkdev(zram_major, "zram");
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001354 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301355}
1356
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301357static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301358{
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001359 int ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301360
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001361 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1362 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1363 if (ret < 0)
1364 return ret;
1365
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001366 ret = class_register(&zram_control_class);
1367 if (ret) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001368 pr_err("Unable to register zram-control class\n");
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001369 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001370 return ret;
1371 }
1372
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301373 zram_major = register_blkdev(0, "zram");
1374 if (zram_major <= 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001375 pr_err("Unable to get major number\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001376 class_unregister(&zram_control_class);
Anna-Maria Gleixner1dd6c832016-11-27 00:13:46 +01001377 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001378 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301379 }
1380
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001381 while (num_devices != 0) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001382 mutex_lock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001383 ret = zram_add();
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001384 mutex_unlock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001385 if (ret < 0)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001386 goto out_error;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001387 num_devices--;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301388 }
1389
Nitin Gupta306b0c92009-09-22 10:26:53 +05301390 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301391
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001392out_error:
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001393 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301394 return ret;
1395}
1396
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301397static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301398{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001399 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301400}
1401
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301402module_init(zram_init);
1403module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301404
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001405module_param(num_devices, uint, 0);
Sergey Senozhatskyc3cdb402015-06-25 15:00:11 -07001406MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001407
Nitin Gupta306b0c92009-09-22 10:26:53 +05301408MODULE_LICENSE("Dual BSD/GPL");
1409MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301410MODULE_DESCRIPTION("Compressed RAM Block Device");