blob: 386866549493e71d2625ab550eaaad70faeca95c [file] [log] [blame]
Nitin Gupta306b0c92009-09-22 10:26:53 +05301/*
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05302 * Compressed RAM block device
Nitin Gupta306b0c92009-09-22 10:26:53 +05303 *
Nitin Gupta1130ebb2010-01-28 21:21:35 +05304 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
Minchan Kim7bfb3de2014-01-30 15:45:55 -08005 * 2012, 2013 Minchan Kim
Nitin Gupta306b0c92009-09-22 10:26:53 +05306 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
Nitin Gupta306b0c92009-09-22 10:26:53 +053013 */
14
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053015#define KMSG_COMPONENT "zram"
Nitin Gupta306b0c92009-09-22 10:26:53 +053016#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/kernel.h>
Randy Dunlap8946a082010-06-23 20:27:09 -070020#include <linux/bio.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053021#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/buffer_head.h>
24#include <linux/device.h>
25#include <linux/genhd.h>
26#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Minchan Kim2e264fb2017-01-10 16:58:21 -080028#include <linux/backing-dev.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053029#include <linux/string.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053030#include <linux/vmalloc.h>
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -070031#include <linux/err.h>
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070032#include <linux/idr.h>
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070033#include <linux/sysfs.h>
Nitin Gupta306b0c92009-09-22 10:26:53 +053034
Nitin Gupta16a4bfb2010-06-01 13:31:24 +053035#include "zram_drv.h"
Nitin Gupta306b0c92009-09-22 10:26:53 +053036
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -070037static DEFINE_IDR(zram_index_idr);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -070038/* idr index must be protected */
39static DEFINE_MUTEX(zram_index_mutex);
40
Nitin Guptaf1e3cff2010-06-01 13:31:25 +053041static int zram_major;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -070042static const char *default_compressor = "lzo";
Nitin Gupta306b0c92009-09-22 10:26:53 +053043
Nitin Gupta306b0c92009-09-22 10:26:53 +053044/* Module params (documentation at end) */
Davidlohr Buesoca3d70b2013-01-01 21:24:13 -080045static unsigned int num_devices = 1;
Nitin Gupta33863c22010-08-09 22:56:47 +053046
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -070047static inline void deprecated_attr_warn(const char *name)
48{
49 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
50 task_pid_nr(current),
51 current->comm,
52 name,
53 "See zram documentation.");
54}
55
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070056#define ZRAM_ATTR_RO(name) \
Sergey Senozhatsky3bca3ef2015-06-25 15:00:03 -070057static ssize_t name##_show(struct device *d, \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070058 struct device_attribute *attr, char *b) \
59{ \
60 struct zram *zram = dev_to_zram(d); \
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -070061 \
62 deprecated_attr_warn(__stringify(name)); \
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -070063 return scnprintf(b, PAGE_SIZE, "%llu\n", \
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070064 (u64)atomic64_read(&zram->stats.name)); \
65} \
Ganesh Mahendran083914e2014-12-12 16:57:13 -080066static DEVICE_ATTR_RO(name);
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -070067
Minchan Kim08eee692015-02-12 15:00:45 -080068static inline bool init_done(struct zram *zram)
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070069{
Minchan Kim08eee692015-02-12 15:00:45 -080070 return zram->disksize;
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -070071}
72
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030073static inline struct zram *dev_to_zram(struct device *dev)
74{
75 return (struct zram *)dev_to_disk(dev)->private_data;
76}
77
Sergey Senozhatskyb31177f2015-06-25 15:00:16 -070078/* flag operations require table entry bit_spin_lock() being held */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070079static int zram_test_flag(struct zram_meta *meta, u32 index,
80 enum zram_pageflags flag)
Andrew Morton99ebbd302015-05-05 16:23:25 -070081{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070082 return meta->table[index].value & BIT(flag);
Andrew Morton99ebbd302015-05-05 16:23:25 -070083}
84
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070085static void zram_set_flag(struct zram_meta *meta, u32 index,
86 enum zram_pageflags flag)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030087{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070088 meta->table[index].value |= BIT(flag);
89}
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +030090
Sergey Senozhatsky522698d2015-06-25 15:00:08 -070091static void zram_clear_flag(struct zram_meta *meta, u32 index,
92 enum zram_pageflags flag)
93{
94 meta->table[index].value &= ~BIT(flag);
95}
96
97static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
98{
99 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
100}
101
102static void zram_set_obj_size(struct zram_meta *meta,
103 u32 index, size_t size)
104{
105 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
106
107 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
108}
109
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800110static inline bool is_partial_io(struct bio_vec *bvec)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700111{
112 return bvec->bv_len != PAGE_SIZE;
113}
114
Minchan Kim2e264fb2017-01-10 16:58:21 -0800115static void zram_revalidate_disk(struct zram *zram)
116{
117 revalidate_disk(zram->disk);
118 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
119 zram->disk->queue->backing_dev_info.capabilities |=
120 BDI_CAP_STABLE_WRITES;
121}
122
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700123/*
124 * Check if request is within bounds and aligned on zram logical blocks.
125 */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800126static inline bool valid_io_request(struct zram *zram,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700127 sector_t start, unsigned int size)
128{
129 u64 end, bound;
130
131 /* unaligned request */
132 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800133 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700134 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800135 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700136
137 end = start + (size >> SECTOR_SHIFT);
138 bound = zram->disksize >> SECTOR_SHIFT;
139 /* out of range range */
140 if (unlikely(start >= bound || end > bound || start > end))
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800141 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700142
143 /* I/O request is valid */
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800144 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700145}
146
147static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
148{
149 if (*offset + bvec->bv_len >= PAGE_SIZE)
150 (*index)++;
151 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
152}
153
154static inline void update_used_max(struct zram *zram,
155 const unsigned long pages)
156{
157 unsigned long old_max, cur_max;
158
159 old_max = atomic_long_read(&zram->stats.max_used_pages);
160
161 do {
162 cur_max = old_max;
163 if (pages > cur_max)
164 old_max = atomic_long_cmpxchg(
165 &zram->stats.max_used_pages, cur_max, pages);
166 } while (old_max != cur_max);
167}
168
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800169static bool page_zero_filled(void *ptr)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700170{
171 unsigned int pos;
172 unsigned long *page;
173
174 page = (unsigned long *)ptr;
175
176 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
177 if (page[pos])
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800178 return false;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700179 }
180
Geliang Tang1c53e0d2015-11-06 16:29:06 -0800181 return true;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700182}
183
184static void handle_zero_page(struct bio_vec *bvec)
185{
186 struct page *page = bvec->bv_page;
187 void *user_mem;
188
189 user_mem = kmap_atomic(page);
190 if (is_partial_io(bvec))
191 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
192 else
193 clear_page(user_mem);
194 kunmap_atomic(user_mem);
195
196 flush_dcache_page(page);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300197}
198
199static ssize_t initstate_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700202 u32 val;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300203 struct zram *zram = dev_to_zram(dev);
204
Sergey Senozhatskya68eb3b2014-04-07 15:38:04 -0700205 down_read(&zram->init_lock);
206 val = init_done(zram);
207 up_read(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300208
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700209 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300210}
211
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700212static ssize_t disksize_show(struct device *dev,
213 struct device_attribute *attr, char *buf)
214{
215 struct zram *zram = dev_to_zram(dev);
216
217 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
218}
219
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300220static ssize_t orig_data_size_show(struct device *dev,
221 struct device_attribute *attr, char *buf)
222{
223 struct zram *zram = dev_to_zram(dev);
224
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700225 deprecated_attr_warn("orig_data_size");
Sergey Senozhatsky56b4e8c2014-04-07 15:38:22 -0700226 return scnprintf(buf, PAGE_SIZE, "%llu\n",
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700227 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300228}
229
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300230static ssize_t mem_used_total_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
232{
233 u64 val = 0;
234 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300235
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700236 deprecated_attr_warn("mem_used_total");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300237 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700238 if (init_done(zram)) {
239 struct zram_meta *meta = zram->meta;
Minchan Kim722cdc12014-10-09 15:29:50 -0700240 val = zs_get_total_pages(meta->mem_pool);
Weijie Yang5a99e952014-10-29 14:50:57 -0700241 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300242 up_read(&zram->init_lock);
243
Minchan Kim722cdc12014-10-09 15:29:50 -0700244 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300245}
246
Minchan Kim9ada9da2014-10-09 15:29:53 -0700247static ssize_t mem_limit_show(struct device *dev,
248 struct device_attribute *attr, char *buf)
249{
250 u64 val;
251 struct zram *zram = dev_to_zram(dev);
252
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700253 deprecated_attr_warn("mem_limit");
Minchan Kim9ada9da2014-10-09 15:29:53 -0700254 down_read(&zram->init_lock);
255 val = zram->limit_pages;
256 up_read(&zram->init_lock);
257
258 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
259}
260
261static ssize_t mem_limit_store(struct device *dev,
262 struct device_attribute *attr, const char *buf, size_t len)
263{
264 u64 limit;
265 char *tmp;
266 struct zram *zram = dev_to_zram(dev);
267
268 limit = memparse(buf, &tmp);
269 if (buf == tmp) /* no chars parsed, invalid input */
270 return -EINVAL;
271
272 down_write(&zram->init_lock);
273 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
274 up_write(&zram->init_lock);
275
276 return len;
277}
278
Minchan Kim461a8ee2014-10-09 15:29:55 -0700279static ssize_t mem_used_max_show(struct device *dev,
280 struct device_attribute *attr, char *buf)
281{
282 u64 val = 0;
283 struct zram *zram = dev_to_zram(dev);
284
Sergey Senozhatsky8f7d2822015-04-15 16:16:09 -0700285 deprecated_attr_warn("mem_used_max");
Minchan Kim461a8ee2014-10-09 15:29:55 -0700286 down_read(&zram->init_lock);
287 if (init_done(zram))
288 val = atomic_long_read(&zram->stats.max_used_pages);
289 up_read(&zram->init_lock);
290
291 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
292}
293
294static ssize_t mem_used_max_store(struct device *dev,
295 struct device_attribute *attr, const char *buf, size_t len)
296{
297 int err;
298 unsigned long val;
299 struct zram *zram = dev_to_zram(dev);
Minchan Kim461a8ee2014-10-09 15:29:55 -0700300
301 err = kstrtoul(buf, 10, &val);
302 if (err || val != 0)
303 return -EINVAL;
304
305 down_read(&zram->init_lock);
Weijie Yang5a99e952014-10-29 14:50:57 -0700306 if (init_done(zram)) {
307 struct zram_meta *meta = zram->meta;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700308 atomic_long_set(&zram->stats.max_used_pages,
309 zs_get_total_pages(meta->mem_pool));
Weijie Yang5a99e952014-10-29 14:50:57 -0700310 }
Minchan Kim461a8ee2014-10-09 15:29:55 -0700311 up_read(&zram->init_lock);
312
313 return len;
314}
315
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700316/*
317 * We switched to per-cpu streams and this attr is not needed anymore.
318 * However, we will keep it around for some time, because:
319 * a) we may revert per-cpu streams in the future
320 * b) it's visible to user space and we need to follow our 2 years
321 * retirement rule; but we already have a number of 'soon to be
322 * altered' attrs, so max_comp_streams need to wait for the next
323 * layoff cycle.
324 */
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700325static ssize_t max_comp_streams_show(struct device *dev,
326 struct device_attribute *attr, char *buf)
327{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700328 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700329}
330
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700331static ssize_t max_comp_streams_store(struct device *dev,
332 struct device_attribute *attr, const char *buf, size_t len)
333{
Sergey Senozhatsky43209ea2016-05-20 16:59:59 -0700334 return len;
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -0700335}
336
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700337static ssize_t comp_algorithm_show(struct device *dev,
338 struct device_attribute *attr, char *buf)
339{
340 size_t sz;
341 struct zram *zram = dev_to_zram(dev);
342
343 down_read(&zram->init_lock);
344 sz = zcomp_available_show(zram->compressor, buf);
345 up_read(&zram->init_lock);
346
347 return sz;
348}
349
350static ssize_t comp_algorithm_store(struct device *dev,
351 struct device_attribute *attr, const char *buf, size_t len)
352{
353 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700354 char compressor[CRYPTO_MAX_ALG_NAME];
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700355 size_t sz;
356
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700357 strlcpy(compressor, buf, sizeof(compressor));
358 /* ignore trailing newline */
359 sz = strlen(compressor);
360 if (sz > 0 && compressor[sz - 1] == '\n')
361 compressor[sz - 1] = 0x00;
362
363 if (!zcomp_available_algorithm(compressor))
Luis Henriques1d5b43b2015-11-06 16:29:01 -0800364 return -EINVAL;
365
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700366 down_write(&zram->init_lock);
367 if (init_done(zram)) {
368 up_write(&zram->init_lock);
369 pr_info("Can't change algorithm for initialized device\n");
370 return -EBUSY;
371 }
Sergey Senozhatsky4bbacd52015-06-25 15:00:29 -0700372
Sergey Senozhatsky415403b2016-07-26 15:22:48 -0700373 strlcpy(zram->compressor, compressor, sizeof(compressor));
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -0700374 up_write(&zram->init_lock);
375 return len;
376}
377
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700378static ssize_t compact_store(struct device *dev,
379 struct device_attribute *attr, const char *buf, size_t len)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530380{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700381 struct zram *zram = dev_to_zram(dev);
382 struct zram_meta *meta;
383
384 down_read(&zram->init_lock);
385 if (!init_done(zram)) {
386 up_read(&zram->init_lock);
387 return -EINVAL;
388 }
389
390 meta = zram->meta;
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700391 zs_compact(meta->mem_pool);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700392 up_read(&zram->init_lock);
393
394 return len;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530395}
396
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700397static ssize_t io_stat_show(struct device *dev,
398 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530399{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700400 struct zram *zram = dev_to_zram(dev);
401 ssize_t ret;
402
403 down_read(&zram->init_lock);
404 ret = scnprintf(buf, PAGE_SIZE,
405 "%8llu %8llu %8llu %8llu\n",
406 (u64)atomic64_read(&zram->stats.failed_reads),
407 (u64)atomic64_read(&zram->stats.failed_writes),
408 (u64)atomic64_read(&zram->stats.invalid_io),
409 (u64)atomic64_read(&zram->stats.notify_free));
410 up_read(&zram->init_lock);
411
412 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530413}
414
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700415static ssize_t mm_stat_show(struct device *dev,
416 struct device_attribute *attr, char *buf)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530417{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700418 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700419 struct zs_pool_stats pool_stats;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700420 u64 orig_size, mem_used = 0;
421 long max_used;
422 ssize_t ret;
423
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700424 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
425
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700426 down_read(&zram->init_lock);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700427 if (init_done(zram)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700428 mem_used = zs_get_total_pages(zram->meta->mem_pool);
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700429 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
430 }
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700431
432 orig_size = atomic64_read(&zram->stats.pages_stored);
433 max_used = atomic_long_read(&zram->stats.max_used_pages);
434
435 ret = scnprintf(buf, PAGE_SIZE,
Sergey Senozhatsky7d3f3932015-09-08 15:04:35 -0700436 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700437 orig_size << PAGE_SHIFT,
438 (u64)atomic64_read(&zram->stats.compr_data_size),
439 mem_used << PAGE_SHIFT,
440 zram->limit_pages << PAGE_SHIFT,
441 max_used << PAGE_SHIFT,
442 (u64)atomic64_read(&zram->stats.zero_pages),
Sergey Senozhatsky860c7072015-09-08 15:04:38 -0700443 pool_stats.pages_compacted);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700444 up_read(&zram->init_lock);
445
446 return ret;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700447}
448
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700449static ssize_t debug_stat_show(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 int version = 1;
453 struct zram *zram = dev_to_zram(dev);
454 ssize_t ret;
455
456 down_read(&zram->init_lock);
457 ret = scnprintf(buf, PAGE_SIZE,
458 "version: %d\n%8llu\n",
459 version,
460 (u64)atomic64_read(&zram->stats.writestall));
461 up_read(&zram->init_lock);
462
463 return ret;
464}
465
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700466static DEVICE_ATTR_RO(io_stat);
467static DEVICE_ATTR_RO(mm_stat);
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700468static DEVICE_ATTR_RO(debug_stat);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700469ZRAM_ATTR_RO(num_reads);
470ZRAM_ATTR_RO(num_writes);
471ZRAM_ATTR_RO(failed_reads);
472ZRAM_ATTR_RO(failed_writes);
473ZRAM_ATTR_RO(invalid_io);
474ZRAM_ATTR_RO(notify_free);
475ZRAM_ATTR_RO(zero_pages);
476ZRAM_ATTR_RO(compr_data_size);
477
478static inline bool zram_meta_get(struct zram *zram)
Weijie Yangd2d5e762014-08-06 16:08:31 -0700479{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700480 if (atomic_inc_not_zero(&zram->refcount))
481 return true;
482 return false;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700483}
484
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700485static inline void zram_meta_put(struct zram *zram)
Weijie Yangd2d5e762014-08-06 16:08:31 -0700486{
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700487 atomic_dec(&zram->refcount);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300488}
489
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800490static void zram_meta_free(struct zram_meta *meta, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300491{
Ganesh Mahendran1fec1172015-02-12 15:00:33 -0800492 size_t num_pages = disksize >> PAGE_SHIFT;
493 size_t index;
494
495 /* Free all pages that are still in this zram device */
496 for (index = 0; index < num_pages; index++) {
497 unsigned long handle = meta->table[index].handle;
498
499 if (!handle)
500 continue;
501
502 zs_free(meta->mem_pool, handle);
503 }
504
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300505 zs_destroy_pool(meta->mem_pool);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300506 vfree(meta->table);
507 kfree(meta);
508}
509
Sergey Senozhatsky4ce321f2015-08-14 15:35:19 -0700510static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300511{
512 size_t num_pages;
513 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800514
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300515 if (!meta)
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800516 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300517
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300518 num_pages = disksize >> PAGE_SHIFT;
519 meta->table = vzalloc(num_pages * sizeof(*meta->table));
520 if (!meta->table) {
521 pr_err("Error allocating zram address table\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800522 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300523 }
524
Sergey Senozhatskyd0d8da22016-05-20 16:59:48 -0700525 meta->mem_pool = zs_create_pool(pool_name);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300526 if (!meta->mem_pool) {
527 pr_err("Error creating memory pool\n");
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800528 goto out_error;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300529 }
530
531 return meta;
532
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800533out_error:
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300534 vfree(meta->table);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300535 kfree(meta);
Sergey Senozhatskyb8179952015-02-12 15:00:31 -0800536 return NULL;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +0300537}
538
Weijie Yangd2d5e762014-08-06 16:08:31 -0700539/*
540 * To protect concurrent access to the same index entry,
541 * caller should hold this table index entry's bit_spinlock to
542 * indicate this index entry is accessing.
543 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +0530544static void zram_free_page(struct zram *zram, size_t index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530545{
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900546 struct zram_meta *meta = zram->meta;
547 unsigned long handle = meta->table[index].handle;
Nitin Gupta306b0c92009-09-22 10:26:53 +0530548
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600549 if (unlikely(!handle)) {
Nitin Gupta2e882282010-01-28 21:13:41 +0530550 /*
551 * No memory is allocated for zero filled pages.
552 * Simply clear zero page flag.
553 */
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900554 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
555 zram_clear_flag(meta, index, ZRAM_ZERO);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700556 atomic64_dec(&zram->stats.zero_pages);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530557 }
558 return;
559 }
560
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900561 zs_free(meta->mem_pool, handle);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530562
Weijie Yangd2d5e762014-08-06 16:08:31 -0700563 atomic64_sub(zram_get_obj_size(meta, index),
564 &zram->stats.compr_data_size);
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700565 atomic64_dec(&zram->stats.pages_stored);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530566
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900567 meta->table[index].handle = 0;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700568 zram_set_obj_size(meta, index, 0);
Nitin Gupta306b0c92009-09-22 10:26:53 +0530569}
570
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300571static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
Nitin Gupta306b0c92009-09-22 10:26:53 +0530572{
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700573 int ret = 0;
Jerome Marchand924bd882011-06-10 15:28:48 +0200574 unsigned char *cmem;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900575 struct zram_meta *meta = zram->meta;
Minchan Kim92967472014-01-30 15:46:03 -0800576 unsigned long handle;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700577 unsigned int size;
Minchan Kim92967472014-01-30 15:46:03 -0800578
Weijie Yangd2d5e762014-08-06 16:08:31 -0700579 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim92967472014-01-30 15:46:03 -0800580 handle = meta->table[index].handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700581 size = zram_get_obj_size(meta, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200582
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900583 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700584 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kime3c72582017-04-13 14:56:37 -0700585 memset(mem, 0, PAGE_SIZE);
Jerome Marchand924bd882011-06-10 15:28:48 +0200586 return 0;
587 }
588
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900589 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700590 if (size == PAGE_SIZE) {
Minchan Kime3c72582017-04-13 14:56:37 -0700591 memcpy(mem, cmem, PAGE_SIZE);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700592 } else {
593 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
594
595 ret = zcomp_decompress(zstrm, cmem, size, mem);
596 zcomp_stream_put(zram->comp);
597 }
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900598 zs_unmap_object(meta->mem_pool, handle);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700599 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand924bd882011-06-10 15:28:48 +0200600
601 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700602 if (unlikely(ret)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200603 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
Jerome Marchand924bd882011-06-10 15:28:48 +0200604 return ret;
605 }
606
607 return 0;
608}
609
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300610static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
karam.leeb627cff2014-12-12 16:56:47 -0800611 u32 index, int offset)
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300612{
613 int ret;
614 struct page *page;
615 unsigned char *user_mem, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900616 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300617 page = bvec->bv_page;
618
Weijie Yangd2d5e762014-08-06 16:08:31 -0700619 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900620 if (unlikely(!meta->table[index].handle) ||
621 zram_test_flag(meta, index, ZRAM_ZERO)) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700622 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300623 handle_zero_page(bvec);
624 return 0;
625 }
Weijie Yangd2d5e762014-08-06 16:08:31 -0700626 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300627
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300628 if (is_partial_io(bvec))
629 /* Use a temporary buffer to decompress the page */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900630 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
631
632 user_mem = kmap_atomic(page);
633 if (!is_partial_io(bvec))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300634 uncmem = user_mem;
635
636 if (!uncmem) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -0700637 pr_err("Unable to allocate temp memory\n");
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300638 ret = -ENOMEM;
639 goto out_cleanup;
640 }
641
642 ret = zram_decompress_page(zram, uncmem, index);
643 /* Should NEVER happen. Return bio error if it does. */
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700644 if (unlikely(ret))
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300645 goto out_cleanup;
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300646
647 if (is_partial_io(bvec))
648 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
649 bvec->bv_len);
650
651 flush_dcache_page(page);
652 ret = 0;
653out_cleanup:
654 kunmap_atomic(user_mem);
655 if (is_partial_io(bvec))
656 kfree(uncmem);
657 return ret;
658}
659
Jerome Marchand924bd882011-06-10 15:28:48 +0200660static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
661 int offset)
662{
Nitin Gupta397c6062013-01-02 08:53:41 -0800663 int ret = 0;
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700664 unsigned int clen;
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700665 unsigned long handle = 0;
Minchan Kim130f3152012-06-08 15:39:27 +0900666 struct page *page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200667 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900668 struct zram_meta *meta = zram->meta;
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700669 struct zcomp_strm *zstrm = NULL;
Minchan Kim461a8ee2014-10-09 15:29:55 -0700670 unsigned long alloced_pages;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200671
672 page = bvec->bv_page;
Jerome Marchand924bd882011-06-10 15:28:48 +0200673 if (is_partial_io(bvec)) {
674 /*
675 * This is a partial IO. We need to read the full page
676 * before to write the changes.
677 */
Minchan Kim7e5a5102013-01-30 11:41:39 +0900678 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
Jerome Marchand924bd882011-06-10 15:28:48 +0200679 if (!uncmem) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200680 ret = -ENOMEM;
681 goto out;
682 }
Sergey Senozhatsky37b51fd2012-10-30 22:40:23 +0300683 ret = zram_decompress_page(zram, uncmem, index);
Nitin Gupta397c6062013-01-02 08:53:41 -0800684 if (ret)
Jerome Marchand924bd882011-06-10 15:28:48 +0200685 goto out;
Jerome Marchand924bd882011-06-10 15:28:48 +0200686 }
687
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700688compress_again:
Cong Wangba82fe22011-11-25 23:14:25 +0800689 user_mem = kmap_atomic(page);
Nitin Gupta397c6062013-01-02 08:53:41 -0800690 if (is_partial_io(bvec)) {
Jerome Marchand924bd882011-06-10 15:28:48 +0200691 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
692 bvec->bv_len);
Nitin Gupta397c6062013-01-02 08:53:41 -0800693 kunmap_atomic(user_mem);
694 user_mem = NULL;
695 } else {
Jerome Marchand924bd882011-06-10 15:28:48 +0200696 uncmem = user_mem;
Nitin Gupta397c6062013-01-02 08:53:41 -0800697 }
Jerome Marchand924bd882011-06-10 15:28:48 +0200698
699 if (page_zero_filled(uncmem)) {
Weijie Yangc4065152014-11-13 15:19:05 -0800700 if (user_mem)
701 kunmap_atomic(user_mem);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900702 /* Free memory associated with this sector now. */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700703 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900704 zram_free_page(zram, index);
Minchan Kim92967472014-01-30 15:46:03 -0800705 zram_set_flag(meta, index, ZRAM_ZERO);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700706 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900707
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700708 atomic64_inc(&zram->stats.zero_pages);
Jerome Marchand924bd882011-06-10 15:28:48 +0200709 ret = 0;
710 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200711 }
712
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700713 zstrm = zcomp_stream_get(zram->comp);
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700714 ret = zcomp_compress(zstrm, uncmem, &clen);
Nitin Gupta397c6062013-01-02 08:53:41 -0800715 if (!is_partial_io(bvec)) {
716 kunmap_atomic(user_mem);
717 user_mem = NULL;
718 uncmem = NULL;
719 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200720
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700721 if (unlikely(ret)) {
Jerome Marchand8c921b22011-06-10 15:28:47 +0200722 pr_err("Compression failed! err=%d\n", ret);
Jerome Marchand924bd882011-06-10 15:28:48 +0200723 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200724 }
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700725
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -0700726 src = zstrm->buffer;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700727 if (unlikely(clen > max_zpage_size)) {
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700728 clen = PAGE_SIZE;
Nitin Gupta397c6062013-01-02 08:53:41 -0800729 if (is_partial_io(bvec))
730 src = uncmem;
Nitin Guptac8f2f0d2012-10-10 17:42:18 -0700731 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200732
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700733 /*
734 * handle allocation has 2 paths:
735 * a) fast path is executed with preemption disabled (for
736 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
737 * since we can't sleep;
738 * b) slow path enables preemption and attempts to allocate
739 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
740 * put per-cpu compression stream and, thus, to re-do
741 * the compression once handle is allocated.
742 *
743 * if we have a 'non-null' handle here then we are coming
744 * from the slow path and handle has already been allocated.
745 */
746 if (!handle)
747 handle = zs_malloc(meta->mem_pool, clen,
748 __GFP_KSWAPD_RECLAIM |
749 __GFP_NOWARN |
Minchan Kim9bc482d2016-07-26 15:23:34 -0700750 __GFP_HIGHMEM |
751 __GFP_MOVABLE);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600752 if (!handle) {
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700753 zcomp_stream_put(zram->comp);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700754 zstrm = NULL;
755
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -0700756 atomic64_inc(&zram->stats.writestall);
757
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700758 handle = zs_malloc(meta->mem_pool, clen,
Minchan Kim9bc482d2016-07-26 15:23:34 -0700759 GFP_NOIO | __GFP_HIGHMEM |
760 __GFP_MOVABLE);
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -0700761 if (handle)
762 goto compress_again;
763
Sergey Senozhatskyebaf9ab2016-07-26 15:22:45 -0700764 pr_err("Error allocating memory for compressed page: %u, size=%u\n",
Marlies Ruck596b3dd2013-05-16 14:30:39 -0400765 index, clen);
Jerome Marchand924bd882011-06-10 15:28:48 +0200766 ret = -ENOMEM;
767 goto out;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200768 }
Minchan Kim9ada9da2014-10-09 15:29:53 -0700769
Minchan Kim461a8ee2014-10-09 15:29:55 -0700770 alloced_pages = zs_get_total_pages(meta->mem_pool);
Sergey SENOZHATSKY12372752015-11-06 16:29:04 -0800771 update_used_max(zram, alloced_pages);
772
Minchan Kim461a8ee2014-10-09 15:29:55 -0700773 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
Minchan Kim9ada9da2014-10-09 15:29:53 -0700774 zs_free(meta->mem_pool, handle);
775 ret = -ENOMEM;
776 goto out;
777 }
778
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900779 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200780
Jiang Liu42e99bd2013-06-07 00:07:30 +0800781 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
Nitin Gupta397c6062013-01-02 08:53:41 -0800782 src = kmap_atomic(page);
Minchan Kime3c72582017-04-13 14:56:37 -0700783 memcpy(cmem, src, PAGE_SIZE);
Nitin Gupta397c6062013-01-02 08:53:41 -0800784 kunmap_atomic(src);
Jiang Liu42e99bd2013-06-07 00:07:30 +0800785 } else {
786 memcpy(cmem, src, clen);
787 }
Jerome Marchand8c921b22011-06-10 15:28:47 +0200788
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700789 zcomp_stream_put(zram->comp);
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700790 zstrm = NULL;
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900791 zs_unmap_object(meta->mem_pool, handle);
Nitin Guptafd1a30d2012-01-09 16:51:59 -0600792
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900793 /*
794 * Free memory associated with this sector
795 * before overwriting unused sectors.
796 */
Weijie Yangd2d5e762014-08-06 16:08:31 -0700797 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Sunghan Suhf40ac2a2013-07-03 20:10:05 +0900798 zram_free_page(zram, index);
799
Minchan Kim8b3cc3e2013-02-06 08:48:53 +0900800 meta->table[index].handle = handle;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700801 zram_set_obj_size(meta, index, clen);
802 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Jerome Marchand8c921b22011-06-10 15:28:47 +0200803
804 /* Update stats */
Sergey Senozhatsky90a78062014-04-07 15:38:03 -0700805 atomic64_add(clen, &zram->stats.compr_data_size);
806 atomic64_inc(&zram->stats.pages_stored);
Jerome Marchand924bd882011-06-10 15:28:48 +0200807out:
Sergey Senozhatsky17162f42015-06-25 15:00:27 -0700808 if (zstrm)
Sergey Senozhatsky2aea8492016-07-26 15:22:42 -0700809 zcomp_stream_put(zram->comp);
Nitin Gupta397c6062013-01-02 08:53:41 -0800810 if (is_partial_io(bvec))
811 kfree(uncmem);
Jerome Marchand924bd882011-06-10 15:28:48 +0200812 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +0200813}
814
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700815/*
816 * zram_bio_discard - handler on discard request
817 * @index: physical block index in PAGE_SIZE units
818 * @offset: byte offset within physical block
819 */
820static void zram_bio_discard(struct zram *zram, u32 index,
821 int offset, struct bio *bio)
822{
823 size_t n = bio->bi_iter.bi_size;
Weijie Yangd2d5e762014-08-06 16:08:31 -0700824 struct zram_meta *meta = zram->meta;
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700825
826 /*
827 * zram manages data in physical block size units. Because logical block
828 * size isn't identical with physical block size on some arch, we
829 * could get a discard request pointing to a specific offset within a
830 * certain physical block. Although we can handle this request by
831 * reading that physiclal block and decompressing and partially zeroing
832 * and re-compressing and then re-storing it, this isn't reasonable
833 * because our intent with a discard request is to save memory. So
834 * skipping this logical block is appropriate here.
835 */
836 if (offset) {
Weijie Yang38515c72014-06-04 16:11:06 -0700837 if (n <= (PAGE_SIZE - offset))
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700838 return;
839
Weijie Yang38515c72014-06-04 16:11:06 -0700840 n -= (PAGE_SIZE - offset);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700841 index++;
842 }
843
844 while (n >= PAGE_SIZE) {
Weijie Yangd2d5e762014-08-06 16:08:31 -0700845 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700846 zram_free_page(zram, index);
Weijie Yangd2d5e762014-08-06 16:08:31 -0700847 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
Sergey Senozhatsky015254d2014-10-09 15:29:57 -0700848 atomic64_inc(&zram->stats.notify_free);
Joonsoo Kimf4659d82014-04-07 15:38:24 -0700849 index++;
850 n -= PAGE_SIZE;
851 }
852}
853
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700854static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
Jens Axboec11f0c02016-08-05 08:11:04 -0600855 int offset, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700856{
857 unsigned long start_time = jiffies;
Jens Axboec11f0c02016-08-05 08:11:04 -0600858 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700859 int ret;
860
Jens Axboec11f0c02016-08-05 08:11:04 -0600861 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700862 &zram->disk->part0);
863
Jens Axboec11f0c02016-08-05 08:11:04 -0600864 if (!is_write) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700865 atomic64_inc(&zram->stats.num_reads);
866 ret = zram_bvec_read(zram, bvec, index, offset);
867 } else {
868 atomic64_inc(&zram->stats.num_writes);
869 ret = zram_bvec_write(zram, bvec, index, offset);
870 }
871
Jens Axboec11f0c02016-08-05 08:11:04 -0600872 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700873
874 if (unlikely(ret)) {
Jens Axboec11f0c02016-08-05 08:11:04 -0600875 if (!is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700876 atomic64_inc(&zram->stats.failed_reads);
877 else
878 atomic64_inc(&zram->stats.failed_writes);
879 }
880
881 return ret;
882}
883
884static void __zram_make_request(struct zram *zram, struct bio *bio)
885{
Mike Christieabf54542016-08-04 14:23:34 -0600886 int offset;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700887 u32 index;
888 struct bio_vec bvec;
889 struct bvec_iter iter;
890
891 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
892 offset = (bio->bi_iter.bi_sector &
893 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
894
Mike Christie95fe6c12016-06-05 14:31:48 -0500895 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700896 zram_bio_discard(zram, index, offset, bio);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200897 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700898 return;
899 }
900
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700901 bio_for_each_segment(bvec, bio, iter) {
902 int max_transfer_size = PAGE_SIZE - offset;
903
904 if (bvec.bv_len > max_transfer_size) {
905 /*
906 * zram_bvec_rw() can only make operation on a single
907 * zram page. Split the bio vector.
908 */
909 struct bio_vec bv;
910
911 bv.bv_page = bvec.bv_page;
912 bv.bv_len = max_transfer_size;
913 bv.bv_offset = bvec.bv_offset;
914
Mike Christieabf54542016-08-04 14:23:34 -0600915 if (zram_bvec_rw(zram, &bv, index, offset,
Jens Axboec11f0c02016-08-05 08:11:04 -0600916 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700917 goto out;
918
919 bv.bv_len = bvec.bv_len - max_transfer_size;
920 bv.bv_offset += max_transfer_size;
Mike Christieabf54542016-08-04 14:23:34 -0600921 if (zram_bvec_rw(zram, &bv, index + 1, 0,
Jens Axboec11f0c02016-08-05 08:11:04 -0600922 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700923 goto out;
924 } else
Mike Christieabf54542016-08-04 14:23:34 -0600925 if (zram_bvec_rw(zram, &bvec, index, offset,
Jens Axboec11f0c02016-08-05 08:11:04 -0600926 op_is_write(bio_op(bio))) < 0)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700927 goto out;
928
929 update_position(&index, &offset, &bvec);
930 }
931
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200932 bio_endio(bio);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700933 return;
934
935out:
936 bio_io_error(bio);
937}
938
939/*
940 * Handler function for all zram I/O requests.
941 */
Jens Axboedece1632015-11-05 10:41:16 -0700942static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700943{
944 struct zram *zram = queue->queuedata;
945
946 if (unlikely(!zram_meta_get(zram)))
947 goto error;
948
Kent Overstreet54efd502015-04-23 22:37:18 -0700949 blk_queue_split(queue, &bio, queue->bio_split);
950
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700951 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
952 bio->bi_iter.bi_size)) {
953 atomic64_inc(&zram->stats.invalid_io);
954 goto put_zram;
955 }
956
957 __zram_make_request(zram, bio);
958 zram_meta_put(zram);
Jens Axboedece1632015-11-05 10:41:16 -0700959 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700960put_zram:
961 zram_meta_put(zram);
962error:
963 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700964 return BLK_QC_T_NONE;
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700965}
966
967static void zram_slot_free_notify(struct block_device *bdev,
968 unsigned long index)
969{
970 struct zram *zram;
971 struct zram_meta *meta;
972
973 zram = bdev->bd_disk->private_data;
974 meta = zram->meta;
975
976 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
977 zram_free_page(zram, index);
978 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
979 atomic64_inc(&zram->stats.notify_free);
980}
981
982static int zram_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -0600983 struct page *page, bool is_write)
Sergey Senozhatsky522698d2015-06-25 15:00:08 -0700984{
985 int offset, err = -EIO;
986 u32 index;
987 struct zram *zram;
988 struct bio_vec bv;
989
990 zram = bdev->bd_disk->private_data;
991 if (unlikely(!zram_meta_get(zram)))
992 goto out;
993
994 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
995 atomic64_inc(&zram->stats.invalid_io);
996 err = -EINVAL;
997 goto put_zram;
998 }
999
1000 index = sector >> SECTORS_PER_PAGE_SHIFT;
1001 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
1002
1003 bv.bv_page = page;
1004 bv.bv_len = PAGE_SIZE;
1005 bv.bv_offset = 0;
1006
Jens Axboec11f0c02016-08-05 08:11:04 -06001007 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001008put_zram:
1009 zram_meta_put(zram);
1010out:
1011 /*
1012 * If I/O fails, just return error(ie, non-zero) without
1013 * calling page_endio.
1014 * It causes resubmit the I/O with bio request by upper functions
1015 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1016 * bio->bi_end_io does things to handle the error
1017 * (e.g., SetPageError, set_page_dirty and extra works).
1018 */
1019 if (err == 0)
Jens Axboec11f0c02016-08-05 08:11:04 -06001020 page_endio(page, is_write, 0);
Sergey Senozhatsky522698d2015-06-25 15:00:08 -07001021 return err;
1022}
1023
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001024static void zram_reset_device(struct zram *zram)
Jerome Marchand924bd882011-06-10 15:28:48 +02001025{
Minchan Kim08eee692015-02-12 15:00:45 -08001026 struct zram_meta *meta;
1027 struct zcomp *comp;
1028 u64 disksize;
1029
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001030 down_write(&zram->init_lock);
Minchan Kim9ada9da2014-10-09 15:29:53 -07001031
1032 zram->limit_pages = 0;
1033
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001034 if (!init_done(zram)) {
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001035 up_write(&zram->init_lock);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001036 return;
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001037 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001038
Minchan Kim08eee692015-02-12 15:00:45 -08001039 meta = zram->meta;
1040 comp = zram->comp;
1041 disksize = zram->disksize;
1042 /*
1043 * Refcount will go down to 0 eventually and r/w handler
1044 * cannot handle further I/O so it will bail out by
1045 * check zram_meta_get.
1046 */
1047 zram_meta_put(zram);
1048 /*
1049 * We want to free zram_meta in process context to avoid
1050 * deadlock between reclaim path and any other locks.
1051 */
1052 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
1053
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001054 /* Reset stats */
1055 memset(&zram->stats, 0, sizeof(zram->stats));
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001056 zram->disksize = 0;
Weijie Yangd7ad41a2015-06-10 11:14:49 -07001057
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001058 set_capacity(zram->disk, 0);
Weijie Yangd7ad41a2015-06-10 11:14:49 -07001059 part_stat_set_all(&zram->disk->part0, 0);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001060
Sergey Senozhatsky644d4782013-06-26 15:28:39 +03001061 up_write(&zram->init_lock);
Minchan Kim08eee692015-02-12 15:00:45 -08001062 /* I/O operation under all of CPU are done so let's free */
1063 zram_meta_free(meta, disksize);
1064 zcomp_destroy(comp);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001065}
1066
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001067static ssize_t disksize_store(struct device *dev,
1068 struct device_attribute *attr, const char *buf, size_t len)
1069{
1070 u64 disksize;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001071 struct zcomp *comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001072 struct zram_meta *meta;
1073 struct zram *zram = dev_to_zram(dev);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001074 int err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001075
1076 disksize = memparse(buf, NULL);
1077 if (!disksize)
1078 return -EINVAL;
1079
1080 disksize = PAGE_ALIGN(disksize);
Sergey Senozhatsky4ce321f2015-08-14 15:35:19 -07001081 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
Minchan Kimdb5d7112014-03-03 15:38:34 -08001082 if (!meta)
1083 return -ENOMEM;
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001084
Sergey Senozhatskyda9556a2016-05-20 16:59:51 -07001085 comp = zcomp_create(zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001086 if (IS_ERR(comp)) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001087 pr_err("Cannot initialise %s compressing backend\n",
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001088 zram->compressor);
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001089 err = PTR_ERR(comp);
1090 goto out_free_meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001091 }
1092
1093 down_write(&zram->init_lock);
1094 if (init_done(zram)) {
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001095 pr_info("Cannot change disksize for initialized device\n");
1096 err = -EBUSY;
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001097 goto out_destroy_comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001098 }
1099
Minchan Kim08eee692015-02-12 15:00:45 -08001100 init_waitqueue_head(&zram->io_done);
1101 atomic_set(&zram->refcount, 1);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001102 zram->meta = meta;
Sergey Senozhatskyd61f98c2014-04-07 15:38:19 -07001103 zram->comp = comp;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001104 zram->disksize = disksize;
1105 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
Minchan Kim2e264fb2017-01-10 16:58:21 -08001106 zram_revalidate_disk(zram);
Minchan Kimad4764b2017-01-10 16:58:18 -08001107 up_write(&zram->init_lock);
Minchan Kimb4c5c602014-07-23 14:00:04 -07001108
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001109 return len;
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001110
Sergey Senozhatskyfcfa8d92014-04-07 15:38:20 -07001111out_destroy_comp:
1112 up_write(&zram->init_lock);
1113 zcomp_destroy(comp);
1114out_free_meta:
Ganesh Mahendran1fec1172015-02-12 15:00:33 -08001115 zram_meta_free(meta, disksize);
Sergey Senozhatskyb7ca2322014-04-07 15:38:12 -07001116 return err;
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001117}
1118
1119static ssize_t reset_store(struct device *dev,
1120 struct device_attribute *attr, const char *buf, size_t len)
1121{
1122 int ret;
1123 unsigned short do_reset;
1124 struct zram *zram;
1125 struct block_device *bdev;
1126
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001127 ret = kstrtou16(buf, 10, &do_reset);
1128 if (ret)
1129 return ret;
1130
1131 if (!do_reset)
1132 return -EINVAL;
1133
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001134 zram = dev_to_zram(dev);
1135 bdev = bdget_disk(zram->disk, 0);
Rashika Kheria46a51c82013-10-30 18:36:32 +05301136 if (!bdev)
1137 return -ENOMEM;
1138
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001139 mutex_lock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001140 /* Do not reset an active device or claimed device */
1141 if (bdev->bd_openers || zram->claim) {
1142 mutex_unlock(&bdev->bd_mutex);
1143 bdput(bdev);
1144 return -EBUSY;
Rashika Kheria1b672222013-11-10 22:13:53 +05301145 }
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001146
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001147 /* From now on, anyone can't open /dev/zram[0-9] */
1148 zram->claim = true;
1149 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001150
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001151 /* Make sure all the pending I/O are finished */
Rashika Kheria46a51c82013-10-30 18:36:32 +05301152 fsync_bdev(bdev);
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001153 zram_reset_device(zram);
Minchan Kim2e264fb2017-01-10 16:58:21 -08001154 zram_revalidate_disk(zram);
Rashika Kheria1b672222013-11-10 22:13:53 +05301155 bdput(bdev);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001156
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001157 mutex_lock(&bdev->bd_mutex);
1158 zram->claim = false;
Sergey Senozhatskyba6b17d2015-02-12 15:00:36 -08001159 mutex_unlock(&bdev->bd_mutex);
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001160
1161 return len;
1162}
1163
1164static int zram_open(struct block_device *bdev, fmode_t mode)
1165{
1166 int ret = 0;
1167 struct zram *zram;
1168
1169 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1170
1171 zram = bdev->bd_disk->private_data;
1172 /* zram was claimed to reset so open request fails */
1173 if (zram->claim)
1174 ret = -EBUSY;
1175
Rashika Kheria1b672222013-11-10 22:13:53 +05301176 return ret;
Jerome Marchand8c921b22011-06-10 15:28:47 +02001177}
1178
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301179static const struct block_device_operations zram_devops = {
Sergey Senozhatskyf405c442015-06-25 15:00:21 -07001180 .open = zram_open,
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301181 .swap_slot_free_notify = zram_slot_free_notify,
karam.lee8c7f0102014-12-12 16:56:53 -08001182 .rw_page = zram_rw_page,
Nitin Gupta107c1612010-05-17 11:02:44 +05301183 .owner = THIS_MODULE
Nitin Gupta306b0c92009-09-22 10:26:53 +05301184};
1185
Andrew Morton99ebbd302015-05-05 16:23:25 -07001186static DEVICE_ATTR_WO(compact);
Ganesh Mahendran083914e2014-12-12 16:57:13 -08001187static DEVICE_ATTR_RW(disksize);
1188static DEVICE_ATTR_RO(initstate);
1189static DEVICE_ATTR_WO(reset);
1190static DEVICE_ATTR_RO(orig_data_size);
1191static DEVICE_ATTR_RO(mem_used_total);
1192static DEVICE_ATTR_RW(mem_limit);
1193static DEVICE_ATTR_RW(mem_used_max);
1194static DEVICE_ATTR_RW(max_comp_streams);
1195static DEVICE_ATTR_RW(comp_algorithm);
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001196
1197static struct attribute *zram_disk_attrs[] = {
1198 &dev_attr_disksize.attr,
1199 &dev_attr_initstate.attr,
1200 &dev_attr_reset.attr,
1201 &dev_attr_num_reads.attr,
1202 &dev_attr_num_writes.attr,
Sergey Senozhatsky64447242014-04-07 15:38:05 -07001203 &dev_attr_failed_reads.attr,
1204 &dev_attr_failed_writes.attr,
Andrew Morton99ebbd302015-05-05 16:23:25 -07001205 &dev_attr_compact.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001206 &dev_attr_invalid_io.attr,
1207 &dev_attr_notify_free.attr,
1208 &dev_attr_zero_pages.attr,
1209 &dev_attr_orig_data_size.attr,
1210 &dev_attr_compr_data_size.attr,
1211 &dev_attr_mem_used_total.attr,
Minchan Kim9ada9da2014-10-09 15:29:53 -07001212 &dev_attr_mem_limit.attr,
Minchan Kim461a8ee2014-10-09 15:29:55 -07001213 &dev_attr_mem_used_max.attr,
Sergey Senozhatskybeca3ec2014-04-07 15:38:14 -07001214 &dev_attr_max_comp_streams.attr,
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001215 &dev_attr_comp_algorithm.attr,
Sergey Senozhatsky2f6a3be2015-04-15 16:16:03 -07001216 &dev_attr_io_stat.attr,
Sergey Senozhatsky4f2109f2015-04-15 16:16:06 -07001217 &dev_attr_mm_stat.attr,
Sergey Senozhatsky623e47f2016-05-20 17:00:02 -07001218 &dev_attr_debug_stat.attr,
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001219 NULL,
1220};
1221
1222static struct attribute_group zram_disk_attr_group = {
1223 .attrs = zram_disk_attrs,
1224};
1225
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001226/*
1227 * Allocate and initialize new zram device. the function returns
1228 * '>= 0' device_id upon success, and negative value otherwise.
1229 */
1230static int zram_add(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301231{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001232 struct zram *zram;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001233 struct request_queue *queue;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001234 int ret, device_id;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001235
1236 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1237 if (!zram)
1238 return -ENOMEM;
1239
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001240 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001241 if (ret < 0)
1242 goto out_free_dev;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001243 device_id = ret;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301244
Jerome Marchand0900bea2011-09-06 15:02:11 +02001245 init_rwsem(&zram->init_lock);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301246
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001247 queue = blk_alloc_queue(GFP_KERNEL);
1248 if (!queue) {
Nitin Gupta306b0c92009-09-22 10:26:53 +05301249 pr_err("Error allocating disk queue for device %d\n",
1250 device_id);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001251 ret = -ENOMEM;
1252 goto out_free_idr;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301253 }
1254
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001255 blk_queue_make_request(queue, zram_make_request);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301256
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001257 /* gendisk structure */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301258 zram->disk = alloc_disk(1);
1259 if (!zram->disk) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001260 pr_err("Error allocating disk structure for device %d\n",
Nitin Gupta306b0c92009-09-22 10:26:53 +05301261 device_id);
Julia Lawall201c7b72015-04-15 16:16:27 -07001262 ret = -ENOMEM;
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001263 goto out_free_queue;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301264 }
1265
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301266 zram->disk->major = zram_major;
1267 zram->disk->first_minor = device_id;
1268 zram->disk->fops = &zram_devops;
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001269 zram->disk->queue = queue;
1270 zram->disk->queue->queuedata = zram;
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301271 zram->disk->private_data = zram;
1272 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301273
Vinayak Menonbb730a12015-02-25 19:43:59 +05301274 __set_bit(QUEUE_FLAG_FAST, &zram->disk->queue->queue_flags);
Nitin Gupta33863c22010-08-09 22:56:47 +05301275 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301276 set_capacity(zram->disk, 0);
Sergey Senozhatskyb67d1ec2014-04-07 15:38:09 -07001277 /* zram devices sort of resembles non-rotational disks */
1278 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001279 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
Nitin Guptaa1dd52a2010-06-01 13:31:23 +05301280 /*
1281 * To ensure that we always get PAGE_SIZE aligned
1282 * and n*PAGE_SIZED sized I/O requests.
1283 */
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301284 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
Robert Jennings7b19b8d2011-01-28 08:58:17 -06001285 blk_queue_logical_block_size(zram->disk->queue,
1286 ZRAM_LOGICAL_BLOCK_SIZE);
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301287 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1288 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001289 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001290 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
Joonsoo Kimf4659d82014-04-07 15:38:24 -07001291 /*
1292 * zram_bio_discard() will clear all logical blocks if logical block
1293 * size is identical with physical block size(PAGE_SIZE). But if it is
1294 * different, we will skip discarding some parts of logical blocks in
1295 * the part of the request range which isn't aligned to physical block
1296 * size. So we can't ensure that all discarded logical blocks are
1297 * zeroed.
1298 */
1299 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1300 zram->disk->queue->limits.discard_zeroes_data = 1;
1301 else
1302 zram->disk->queue->limits.discard_zeroes_data = 0;
1303 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
Nitin Gupta5d83d5a2010-01-28 21:13:39 +05301304
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301305 add_disk(zram->disk);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301306
Nitin Gupta33863c22010-08-09 22:56:47 +05301307 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1308 &zram_disk_attr_group);
1309 if (ret < 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001310 pr_err("Error creating sysfs group for device %d\n",
1311 device_id);
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001312 goto out_free_disk;
Nitin Gupta33863c22010-08-09 22:56:47 +05301313 }
Sergey Senozhatskye46b8a02014-04-07 15:38:17 -07001314 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
Sergey Senozhatskybe2d1d52014-04-07 15:38:00 -07001315 zram->meta = NULL;
Sergey Senozhatskyd12b63c2015-06-25 15:00:14 -07001316
1317 pr_info("Added device: %s\n", zram->disk->disk_name);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001318 return device_id;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301319
Jiang Liu39a9b8a2013-06-07 00:07:24 +08001320out_free_disk:
1321 del_gendisk(zram->disk);
1322 put_disk(zram->disk);
1323out_free_queue:
Sergey Senozhatskyee9801602015-02-12 15:00:48 -08001324 blk_cleanup_queue(queue);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001325out_free_idr:
1326 idr_remove(&zram_index_idr, device_id);
1327out_free_dev:
1328 kfree(zram);
Nitin Guptade1a21a2010-01-28 21:13:40 +05301329 return ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301330}
1331
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001332static int zram_remove(struct zram *zram)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301333{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001334 struct block_device *bdev;
1335
1336 bdev = bdget_disk(zram->disk, 0);
1337 if (!bdev)
1338 return -ENOMEM;
1339
1340 mutex_lock(&bdev->bd_mutex);
1341 if (bdev->bd_openers || zram->claim) {
1342 mutex_unlock(&bdev->bd_mutex);
1343 bdput(bdev);
1344 return -EBUSY;
1345 }
1346
1347 zram->claim = true;
1348 mutex_unlock(&bdev->bd_mutex);
1349
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001350 /*
1351 * Remove sysfs first, so no one will perform a disksize
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001352 * store while we destroy the devices. This also helps during
1353 * hot_remove -- zram_reset_device() is the last holder of
1354 * ->init_lock, no later/concurrent disksize_store() or any
1355 * other sysfs handlers are possible.
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001356 */
1357 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1358 &zram_disk_attr_group);
Nitin Gupta33863c22010-08-09 22:56:47 +05301359
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001360 /* Make sure all the pending I/O are finished */
1361 fsync_bdev(bdev);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001362 zram_reset_device(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001363 bdput(bdev);
1364
1365 pr_info("Removed device: %s\n", zram->disk->disk_name);
1366
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001367 blk_cleanup_queue(zram->disk->queue);
1368 del_gendisk(zram->disk);
1369 put_disk(zram->disk);
1370 kfree(zram);
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001371 return 0;
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001372}
Nitin Gupta306b0c92009-09-22 10:26:53 +05301373
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001374/* zram-control sysfs attributes */
1375static ssize_t hot_add_show(struct class *class,
1376 struct class_attribute *attr,
1377 char *buf)
1378{
1379 int ret;
1380
1381 mutex_lock(&zram_index_mutex);
1382 ret = zram_add();
1383 mutex_unlock(&zram_index_mutex);
1384
1385 if (ret < 0)
1386 return ret;
1387 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1388}
1389
1390static ssize_t hot_remove_store(struct class *class,
1391 struct class_attribute *attr,
1392 const char *buf,
1393 size_t count)
1394{
1395 struct zram *zram;
1396 int ret, dev_id;
1397
1398 /* dev_id is gendisk->first_minor, which is `int' */
1399 ret = kstrtoint(buf, 10, &dev_id);
1400 if (ret)
1401 return ret;
1402 if (dev_id < 0)
1403 return -EINVAL;
1404
1405 mutex_lock(&zram_index_mutex);
1406
1407 zram = idr_find(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001408 if (zram) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001409 ret = zram_remove(zram);
Takashi Iwai529e71e2016-11-30 15:54:08 -08001410 if (!ret)
1411 idr_remove(&zram_index_idr, dev_id);
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001412 } else {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001413 ret = -ENODEV;
Jerome Marchand17ec4cd2016-01-15 16:54:48 -08001414 }
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001415
1416 mutex_unlock(&zram_index_mutex);
1417 return ret ? ret : count;
1418}
1419
Sergey Senozhatsky5c7e9cc2016-12-07 14:44:31 -08001420/*
1421 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1422 * sense that reading from this file does alter the state of your system -- it
1423 * creates a new un-initialized zram device and returns back this device's
1424 * device_id (or an error code if it fails to create a new device).
1425 */
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001426static struct class_attribute zram_control_class_attrs[] = {
Sergey Senozhatsky5c7e9cc2016-12-07 14:44:31 -08001427 __ATTR(hot_add, 0400, hot_add_show, NULL),
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001428 __ATTR_WO(hot_remove),
1429 __ATTR_NULL,
1430};
1431
1432static struct class zram_control_class = {
1433 .name = "zram-control",
1434 .owner = THIS_MODULE,
1435 .class_attrs = zram_control_class_attrs,
1436};
1437
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001438static int zram_remove_cb(int id, void *ptr, void *data)
1439{
1440 zram_remove(ptr);
1441 return 0;
1442}
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001443
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001444static void destroy_devices(void)
1445{
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001446 class_unregister(&zram_control_class);
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001447 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1448 idr_destroy(&zram_index_idr);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001449 unregister_blkdev(zram_major, "zram");
Nitin Gupta306b0c92009-09-22 10:26:53 +05301450}
1451
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301452static int __init zram_init(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301453{
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001454 int ret;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301455
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001456 ret = class_register(&zram_control_class);
1457 if (ret) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001458 pr_err("Unable to register zram-control class\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001459 return ret;
1460 }
1461
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301462 zram_major = register_blkdev(0, "zram");
1463 if (zram_major <= 0) {
Sergey Senozhatsky70864962015-09-08 15:04:58 -07001464 pr_err("Unable to get major number\n");
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001465 class_unregister(&zram_control_class);
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001466 return -EBUSY;
Nitin Gupta306b0c92009-09-22 10:26:53 +05301467 }
1468
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001469 while (num_devices != 0) {
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001470 mutex_lock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001471 ret = zram_add();
Sergey Senozhatsky6566d1a2015-06-25 15:00:24 -07001472 mutex_unlock(&zram_index_mutex);
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001473 if (ret < 0)
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001474 goto out_error;
Sergey Senozhatsky92ff1522015-06-25 15:00:19 -07001475 num_devices--;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301476 }
1477
Nitin Gupta306b0c92009-09-22 10:26:53 +05301478 return 0;
Nitin Guptade1a21a2010-01-28 21:13:40 +05301479
Sergey Senozhatskya096caf2015-02-12 15:00:39 -08001480out_error:
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001481 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301482 return ret;
1483}
1484
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301485static void __exit zram_exit(void)
Nitin Gupta306b0c92009-09-22 10:26:53 +05301486{
Sergey Senozhatsky85508ec2015-06-25 15:00:06 -07001487 destroy_devices();
Nitin Gupta306b0c92009-09-22 10:26:53 +05301488}
1489
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301490module_init(zram_init);
1491module_exit(zram_exit);
Nitin Gupta306b0c92009-09-22 10:26:53 +05301492
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001493module_param(num_devices, uint, 0);
Sergey Senozhatskyc3cdb402015-06-25 15:00:11 -07001494MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
Sergey Senozhatsky9b3bb7a2013-06-22 03:21:18 +03001495
Nitin Gupta306b0c92009-09-22 10:26:53 +05301496MODULE_LICENSE("Dual BSD/GPL");
1497MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
Nitin Guptaf1e3cff2010-06-01 13:31:25 +05301498MODULE_DESCRIPTION("Compressed RAM Block Device");