Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2 | * Compressed RAM block device |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 3 | * |
Nitin Gupta | 1130ebb | 2010-01-28 21:21:35 +0530 | [diff] [blame] | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
Minchan Kim | 7bfb3de | 2014-01-30 15:45:55 -0800 | [diff] [blame] | 5 | * 2012, 2013 Minchan Kim |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 6 | * |
| 7 | * This code is released using a dual license strategy: BSD/GPL |
| 8 | * You can choose the licence that better fits your requirements. |
| 9 | * |
| 10 | * Released under the terms of 3-clause BSD License |
| 11 | * Released under the terms of GNU General Public License Version 2.0 |
| 12 | * |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 13 | */ |
| 14 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 15 | #define KMSG_COMPONENT "zram" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 17 | |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kernel.h> |
Randy Dunlap | 8946a08 | 2010-06-23 20:27:09 -0700 | [diff] [blame] | 20 | #include <linux/bio.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 21 | #include <linux/bitops.h> |
| 22 | #include <linux/blkdev.h> |
| 23 | #include <linux/buffer_head.h> |
| 24 | #include <linux/device.h> |
| 25 | #include <linux/genhd.h> |
| 26 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Minchan Kim | 2e264fb | 2017-01-10 16:58:21 -0800 | [diff] [blame] | 28 | #include <linux/backing-dev.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 29 | #include <linux/string.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 30 | #include <linux/vmalloc.h> |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 31 | #include <linux/err.h> |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 32 | #include <linux/idr.h> |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 33 | #include <linux/sysfs.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 34 | |
Nitin Gupta | 16a4bfb | 2010-06-01 13:31:24 +0530 | [diff] [blame] | 35 | #include "zram_drv.h" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 36 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 37 | static DEFINE_IDR(zram_index_idr); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 38 | /* idr index must be protected */ |
| 39 | static DEFINE_MUTEX(zram_index_mutex); |
| 40 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 41 | static int zram_major; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 42 | static const char *default_compressor = "lzo"; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 43 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 44 | /* Module params (documentation at end) */ |
Davidlohr Bueso | ca3d70b | 2013-01-01 21:24:13 -0800 | [diff] [blame] | 45 | static unsigned int num_devices = 1; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 46 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 47 | static inline void deprecated_attr_warn(const char *name) |
| 48 | { |
| 49 | pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n", |
| 50 | task_pid_nr(current), |
| 51 | current->comm, |
| 52 | name, |
| 53 | "See zram documentation."); |
| 54 | } |
| 55 | |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 56 | #define ZRAM_ATTR_RO(name) \ |
Sergey Senozhatsky | 3bca3ef | 2015-06-25 15:00:03 -0700 | [diff] [blame] | 57 | static ssize_t name##_show(struct device *d, \ |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 58 | struct device_attribute *attr, char *b) \ |
| 59 | { \ |
| 60 | struct zram *zram = dev_to_zram(d); \ |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 61 | \ |
| 62 | deprecated_attr_warn(__stringify(name)); \ |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 63 | return scnprintf(b, PAGE_SIZE, "%llu\n", \ |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 64 | (u64)atomic64_read(&zram->stats.name)); \ |
| 65 | } \ |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 66 | static DEVICE_ATTR_RO(name); |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 67 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 68 | static inline bool init_done(struct zram *zram) |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 69 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 70 | return zram->disksize; |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 71 | } |
| 72 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 73 | static inline struct zram *dev_to_zram(struct device *dev) |
| 74 | { |
| 75 | return (struct zram *)dev_to_disk(dev)->private_data; |
| 76 | } |
| 77 | |
Sergey Senozhatsky | b31177f | 2015-06-25 15:00:16 -0700 | [diff] [blame] | 78 | /* flag operations require table entry bit_spin_lock() being held */ |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 79 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
| 80 | enum zram_pageflags flag) |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 81 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 82 | return meta->table[index].value & BIT(flag); |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 83 | } |
| 84 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 85 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
| 86 | enum zram_pageflags flag) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 87 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 88 | meta->table[index].value |= BIT(flag); |
| 89 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 90 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 91 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
| 92 | enum zram_pageflags flag) |
| 93 | { |
| 94 | meta->table[index].value &= ~BIT(flag); |
| 95 | } |
| 96 | |
| 97 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) |
| 98 | { |
| 99 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); |
| 100 | } |
| 101 | |
| 102 | static void zram_set_obj_size(struct zram_meta *meta, |
| 103 | u32 index, size_t size) |
| 104 | { |
| 105 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; |
| 106 | |
| 107 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; |
| 108 | } |
| 109 | |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 110 | static inline bool is_partial_io(struct bio_vec *bvec) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 111 | { |
| 112 | return bvec->bv_len != PAGE_SIZE; |
| 113 | } |
| 114 | |
Minchan Kim | 2e264fb | 2017-01-10 16:58:21 -0800 | [diff] [blame] | 115 | static void zram_revalidate_disk(struct zram *zram) |
| 116 | { |
| 117 | revalidate_disk(zram->disk); |
| 118 | /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ |
| 119 | zram->disk->queue->backing_dev_info.capabilities |= |
| 120 | BDI_CAP_STABLE_WRITES; |
| 121 | } |
| 122 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 123 | /* |
| 124 | * Check if request is within bounds and aligned on zram logical blocks. |
| 125 | */ |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 126 | static inline bool valid_io_request(struct zram *zram, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 127 | sector_t start, unsigned int size) |
| 128 | { |
| 129 | u64 end, bound; |
| 130 | |
| 131 | /* unaligned request */ |
| 132 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 133 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 134 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 135 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 136 | |
| 137 | end = start + (size >> SECTOR_SHIFT); |
| 138 | bound = zram->disksize >> SECTOR_SHIFT; |
| 139 | /* out of range range */ |
| 140 | if (unlikely(start >= bound || end > bound || start > end)) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 141 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 142 | |
| 143 | /* I/O request is valid */ |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 144 | return true; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) |
| 148 | { |
| 149 | if (*offset + bvec->bv_len >= PAGE_SIZE) |
| 150 | (*index)++; |
| 151 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; |
| 152 | } |
| 153 | |
| 154 | static inline void update_used_max(struct zram *zram, |
| 155 | const unsigned long pages) |
| 156 | { |
| 157 | unsigned long old_max, cur_max; |
| 158 | |
| 159 | old_max = atomic_long_read(&zram->stats.max_used_pages); |
| 160 | |
| 161 | do { |
| 162 | cur_max = old_max; |
| 163 | if (pages > cur_max) |
| 164 | old_max = atomic_long_cmpxchg( |
| 165 | &zram->stats.max_used_pages, cur_max, pages); |
| 166 | } while (old_max != cur_max); |
| 167 | } |
| 168 | |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 169 | static bool page_zero_filled(void *ptr) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 170 | { |
| 171 | unsigned int pos; |
| 172 | unsigned long *page; |
| 173 | |
| 174 | page = (unsigned long *)ptr; |
| 175 | |
| 176 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { |
| 177 | if (page[pos]) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 178 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 179 | } |
| 180 | |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 181 | return true; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | static void handle_zero_page(struct bio_vec *bvec) |
| 185 | { |
| 186 | struct page *page = bvec->bv_page; |
| 187 | void *user_mem; |
| 188 | |
| 189 | user_mem = kmap_atomic(page); |
| 190 | if (is_partial_io(bvec)) |
| 191 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); |
| 192 | else |
| 193 | clear_page(user_mem); |
| 194 | kunmap_atomic(user_mem); |
| 195 | |
| 196 | flush_dcache_page(page); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | static ssize_t initstate_show(struct device *dev, |
| 200 | struct device_attribute *attr, char *buf) |
| 201 | { |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 202 | u32 val; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 203 | struct zram *zram = dev_to_zram(dev); |
| 204 | |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 205 | down_read(&zram->init_lock); |
| 206 | val = init_done(zram); |
| 207 | up_read(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 208 | |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 209 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 210 | } |
| 211 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 212 | static ssize_t disksize_show(struct device *dev, |
| 213 | struct device_attribute *attr, char *buf) |
| 214 | { |
| 215 | struct zram *zram = dev_to_zram(dev); |
| 216 | |
| 217 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); |
| 218 | } |
| 219 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 220 | static ssize_t orig_data_size_show(struct device *dev, |
| 221 | struct device_attribute *attr, char *buf) |
| 222 | { |
| 223 | struct zram *zram = dev_to_zram(dev); |
| 224 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 225 | deprecated_attr_warn("orig_data_size"); |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 226 | return scnprintf(buf, PAGE_SIZE, "%llu\n", |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 227 | (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 228 | } |
| 229 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 230 | static ssize_t mem_used_total_show(struct device *dev, |
| 231 | struct device_attribute *attr, char *buf) |
| 232 | { |
| 233 | u64 val = 0; |
| 234 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 235 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 236 | deprecated_attr_warn("mem_used_total"); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 237 | down_read(&zram->init_lock); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 238 | if (init_done(zram)) { |
| 239 | struct zram_meta *meta = zram->meta; |
Minchan Kim | 722cdc1 | 2014-10-09 15:29:50 -0700 | [diff] [blame] | 240 | val = zs_get_total_pages(meta->mem_pool); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 241 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 242 | up_read(&zram->init_lock); |
| 243 | |
Minchan Kim | 722cdc1 | 2014-10-09 15:29:50 -0700 | [diff] [blame] | 244 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 245 | } |
| 246 | |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 247 | static ssize_t mem_limit_show(struct device *dev, |
| 248 | struct device_attribute *attr, char *buf) |
| 249 | { |
| 250 | u64 val; |
| 251 | struct zram *zram = dev_to_zram(dev); |
| 252 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 253 | deprecated_attr_warn("mem_limit"); |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 254 | down_read(&zram->init_lock); |
| 255 | val = zram->limit_pages; |
| 256 | up_read(&zram->init_lock); |
| 257 | |
| 258 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
| 259 | } |
| 260 | |
| 261 | static ssize_t mem_limit_store(struct device *dev, |
| 262 | struct device_attribute *attr, const char *buf, size_t len) |
| 263 | { |
| 264 | u64 limit; |
| 265 | char *tmp; |
| 266 | struct zram *zram = dev_to_zram(dev); |
| 267 | |
| 268 | limit = memparse(buf, &tmp); |
| 269 | if (buf == tmp) /* no chars parsed, invalid input */ |
| 270 | return -EINVAL; |
| 271 | |
| 272 | down_write(&zram->init_lock); |
| 273 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; |
| 274 | up_write(&zram->init_lock); |
| 275 | |
| 276 | return len; |
| 277 | } |
| 278 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 279 | static ssize_t mem_used_max_show(struct device *dev, |
| 280 | struct device_attribute *attr, char *buf) |
| 281 | { |
| 282 | u64 val = 0; |
| 283 | struct zram *zram = dev_to_zram(dev); |
| 284 | |
Sergey Senozhatsky | 8f7d282 | 2015-04-15 16:16:09 -0700 | [diff] [blame] | 285 | deprecated_attr_warn("mem_used_max"); |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 286 | down_read(&zram->init_lock); |
| 287 | if (init_done(zram)) |
| 288 | val = atomic_long_read(&zram->stats.max_used_pages); |
| 289 | up_read(&zram->init_lock); |
| 290 | |
| 291 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
| 292 | } |
| 293 | |
| 294 | static ssize_t mem_used_max_store(struct device *dev, |
| 295 | struct device_attribute *attr, const char *buf, size_t len) |
| 296 | { |
| 297 | int err; |
| 298 | unsigned long val; |
| 299 | struct zram *zram = dev_to_zram(dev); |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 300 | |
| 301 | err = kstrtoul(buf, 10, &val); |
| 302 | if (err || val != 0) |
| 303 | return -EINVAL; |
| 304 | |
| 305 | down_read(&zram->init_lock); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 306 | if (init_done(zram)) { |
| 307 | struct zram_meta *meta = zram->meta; |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 308 | atomic_long_set(&zram->stats.max_used_pages, |
| 309 | zs_get_total_pages(meta->mem_pool)); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 310 | } |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 311 | up_read(&zram->init_lock); |
| 312 | |
| 313 | return len; |
| 314 | } |
| 315 | |
Sergey Senozhatsky | 43209ea | 2016-05-20 16:59:59 -0700 | [diff] [blame] | 316 | /* |
| 317 | * We switched to per-cpu streams and this attr is not needed anymore. |
| 318 | * However, we will keep it around for some time, because: |
| 319 | * a) we may revert per-cpu streams in the future |
| 320 | * b) it's visible to user space and we need to follow our 2 years |
| 321 | * retirement rule; but we already have a number of 'soon to be |
| 322 | * altered' attrs, so max_comp_streams need to wait for the next |
| 323 | * layoff cycle. |
| 324 | */ |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 325 | static ssize_t max_comp_streams_show(struct device *dev, |
| 326 | struct device_attribute *attr, char *buf) |
| 327 | { |
Sergey Senozhatsky | 43209ea | 2016-05-20 16:59:59 -0700 | [diff] [blame] | 328 | return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 329 | } |
| 330 | |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 331 | static ssize_t max_comp_streams_store(struct device *dev, |
| 332 | struct device_attribute *attr, const char *buf, size_t len) |
| 333 | { |
Sergey Senozhatsky | 43209ea | 2016-05-20 16:59:59 -0700 | [diff] [blame] | 334 | return len; |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 337 | static ssize_t comp_algorithm_show(struct device *dev, |
| 338 | struct device_attribute *attr, char *buf) |
| 339 | { |
| 340 | size_t sz; |
| 341 | struct zram *zram = dev_to_zram(dev); |
| 342 | |
| 343 | down_read(&zram->init_lock); |
| 344 | sz = zcomp_available_show(zram->compressor, buf); |
| 345 | up_read(&zram->init_lock); |
| 346 | |
| 347 | return sz; |
| 348 | } |
| 349 | |
| 350 | static ssize_t comp_algorithm_store(struct device *dev, |
| 351 | struct device_attribute *attr, const char *buf, size_t len) |
| 352 | { |
| 353 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | 415403b | 2016-07-26 15:22:48 -0700 | [diff] [blame] | 354 | char compressor[CRYPTO_MAX_ALG_NAME]; |
Sergey Senozhatsky | 4bbacd5 | 2015-06-25 15:00:29 -0700 | [diff] [blame] | 355 | size_t sz; |
| 356 | |
Sergey Senozhatsky | 415403b | 2016-07-26 15:22:48 -0700 | [diff] [blame] | 357 | strlcpy(compressor, buf, sizeof(compressor)); |
| 358 | /* ignore trailing newline */ |
| 359 | sz = strlen(compressor); |
| 360 | if (sz > 0 && compressor[sz - 1] == '\n') |
| 361 | compressor[sz - 1] = 0x00; |
| 362 | |
| 363 | if (!zcomp_available_algorithm(compressor)) |
Luis Henriques | 1d5b43b | 2015-11-06 16:29:01 -0800 | [diff] [blame] | 364 | return -EINVAL; |
| 365 | |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 366 | down_write(&zram->init_lock); |
| 367 | if (init_done(zram)) { |
| 368 | up_write(&zram->init_lock); |
| 369 | pr_info("Can't change algorithm for initialized device\n"); |
| 370 | return -EBUSY; |
| 371 | } |
Sergey Senozhatsky | 4bbacd5 | 2015-06-25 15:00:29 -0700 | [diff] [blame] | 372 | |
Sergey Senozhatsky | 415403b | 2016-07-26 15:22:48 -0700 | [diff] [blame] | 373 | strlcpy(zram->compressor, compressor, sizeof(compressor)); |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 374 | up_write(&zram->init_lock); |
| 375 | return len; |
| 376 | } |
| 377 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 378 | static ssize_t compact_store(struct device *dev, |
| 379 | struct device_attribute *attr, const char *buf, size_t len) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 380 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 381 | struct zram *zram = dev_to_zram(dev); |
| 382 | struct zram_meta *meta; |
| 383 | |
| 384 | down_read(&zram->init_lock); |
| 385 | if (!init_done(zram)) { |
| 386 | up_read(&zram->init_lock); |
| 387 | return -EINVAL; |
| 388 | } |
| 389 | |
| 390 | meta = zram->meta; |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 391 | zs_compact(meta->mem_pool); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 392 | up_read(&zram->init_lock); |
| 393 | |
| 394 | return len; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 395 | } |
| 396 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 397 | static ssize_t io_stat_show(struct device *dev, |
| 398 | struct device_attribute *attr, char *buf) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 399 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 400 | struct zram *zram = dev_to_zram(dev); |
| 401 | ssize_t ret; |
| 402 | |
| 403 | down_read(&zram->init_lock); |
| 404 | ret = scnprintf(buf, PAGE_SIZE, |
| 405 | "%8llu %8llu %8llu %8llu\n", |
| 406 | (u64)atomic64_read(&zram->stats.failed_reads), |
| 407 | (u64)atomic64_read(&zram->stats.failed_writes), |
| 408 | (u64)atomic64_read(&zram->stats.invalid_io), |
| 409 | (u64)atomic64_read(&zram->stats.notify_free)); |
| 410 | up_read(&zram->init_lock); |
| 411 | |
| 412 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 413 | } |
| 414 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 415 | static ssize_t mm_stat_show(struct device *dev, |
| 416 | struct device_attribute *attr, char *buf) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 417 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 418 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 419 | struct zs_pool_stats pool_stats; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 420 | u64 orig_size, mem_used = 0; |
| 421 | long max_used; |
| 422 | ssize_t ret; |
| 423 | |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 424 | memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); |
| 425 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 426 | down_read(&zram->init_lock); |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 427 | if (init_done(zram)) { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 428 | mem_used = zs_get_total_pages(zram->meta->mem_pool); |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 429 | zs_pool_stats(zram->meta->mem_pool, &pool_stats); |
| 430 | } |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 431 | |
| 432 | orig_size = atomic64_read(&zram->stats.pages_stored); |
| 433 | max_used = atomic_long_read(&zram->stats.max_used_pages); |
| 434 | |
| 435 | ret = scnprintf(buf, PAGE_SIZE, |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 436 | "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 437 | orig_size << PAGE_SHIFT, |
| 438 | (u64)atomic64_read(&zram->stats.compr_data_size), |
| 439 | mem_used << PAGE_SHIFT, |
| 440 | zram->limit_pages << PAGE_SHIFT, |
| 441 | max_used << PAGE_SHIFT, |
| 442 | (u64)atomic64_read(&zram->stats.zero_pages), |
Sergey Senozhatsky | 860c707 | 2015-09-08 15:04:38 -0700 | [diff] [blame] | 443 | pool_stats.pages_compacted); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 444 | up_read(&zram->init_lock); |
| 445 | |
| 446 | return ret; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 447 | } |
| 448 | |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 449 | static ssize_t debug_stat_show(struct device *dev, |
| 450 | struct device_attribute *attr, char *buf) |
| 451 | { |
| 452 | int version = 1; |
| 453 | struct zram *zram = dev_to_zram(dev); |
| 454 | ssize_t ret; |
| 455 | |
| 456 | down_read(&zram->init_lock); |
| 457 | ret = scnprintf(buf, PAGE_SIZE, |
| 458 | "version: %d\n%8llu\n", |
| 459 | version, |
| 460 | (u64)atomic64_read(&zram->stats.writestall)); |
| 461 | up_read(&zram->init_lock); |
| 462 | |
| 463 | return ret; |
| 464 | } |
| 465 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 466 | static DEVICE_ATTR_RO(io_stat); |
| 467 | static DEVICE_ATTR_RO(mm_stat); |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 468 | static DEVICE_ATTR_RO(debug_stat); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 469 | ZRAM_ATTR_RO(num_reads); |
| 470 | ZRAM_ATTR_RO(num_writes); |
| 471 | ZRAM_ATTR_RO(failed_reads); |
| 472 | ZRAM_ATTR_RO(failed_writes); |
| 473 | ZRAM_ATTR_RO(invalid_io); |
| 474 | ZRAM_ATTR_RO(notify_free); |
| 475 | ZRAM_ATTR_RO(zero_pages); |
| 476 | ZRAM_ATTR_RO(compr_data_size); |
| 477 | |
| 478 | static inline bool zram_meta_get(struct zram *zram) |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 479 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 480 | if (atomic_inc_not_zero(&zram->refcount)) |
| 481 | return true; |
| 482 | return false; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 485 | static inline void zram_meta_put(struct zram *zram) |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 486 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 487 | atomic_dec(&zram->refcount); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 488 | } |
| 489 | |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 490 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 491 | { |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 492 | size_t num_pages = disksize >> PAGE_SHIFT; |
| 493 | size_t index; |
| 494 | |
| 495 | /* Free all pages that are still in this zram device */ |
| 496 | for (index = 0; index < num_pages; index++) { |
| 497 | unsigned long handle = meta->table[index].handle; |
| 498 | |
| 499 | if (!handle) |
| 500 | continue; |
| 501 | |
| 502 | zs_free(meta->mem_pool, handle); |
| 503 | } |
| 504 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 505 | zs_destroy_pool(meta->mem_pool); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 506 | vfree(meta->table); |
| 507 | kfree(meta); |
| 508 | } |
| 509 | |
Sergey Senozhatsky | 4ce321f | 2015-08-14 15:35:19 -0700 | [diff] [blame] | 510 | static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 511 | { |
| 512 | size_t num_pages; |
| 513 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 514 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 515 | if (!meta) |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 516 | return NULL; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 517 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 518 | num_pages = disksize >> PAGE_SHIFT; |
| 519 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); |
| 520 | if (!meta->table) { |
| 521 | pr_err("Error allocating zram address table\n"); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 522 | goto out_error; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 523 | } |
| 524 | |
Sergey Senozhatsky | d0d8da2 | 2016-05-20 16:59:48 -0700 | [diff] [blame] | 525 | meta->mem_pool = zs_create_pool(pool_name); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 526 | if (!meta->mem_pool) { |
| 527 | pr_err("Error creating memory pool\n"); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 528 | goto out_error; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 529 | } |
| 530 | |
| 531 | return meta; |
| 532 | |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 533 | out_error: |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 534 | vfree(meta->table); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 535 | kfree(meta); |
Sergey Senozhatsky | b817995 | 2015-02-12 15:00:31 -0800 | [diff] [blame] | 536 | return NULL; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 537 | } |
| 538 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 539 | /* |
| 540 | * To protect concurrent access to the same index entry, |
| 541 | * caller should hold this table index entry's bit_spinlock to |
| 542 | * indicate this index entry is accessing. |
| 543 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 544 | static void zram_free_page(struct zram *zram, size_t index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 545 | { |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 546 | struct zram_meta *meta = zram->meta; |
| 547 | unsigned long handle = meta->table[index].handle; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 548 | |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 549 | if (unlikely(!handle)) { |
Nitin Gupta | 2e88228 | 2010-01-28 21:13:41 +0530 | [diff] [blame] | 550 | /* |
| 551 | * No memory is allocated for zero filled pages. |
| 552 | * Simply clear zero page flag. |
| 553 | */ |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 554 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
| 555 | zram_clear_flag(meta, index, ZRAM_ZERO); |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 556 | atomic64_dec(&zram->stats.zero_pages); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 557 | } |
| 558 | return; |
| 559 | } |
| 560 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 561 | zs_free(meta->mem_pool, handle); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 562 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 563 | atomic64_sub(zram_get_obj_size(meta, index), |
| 564 | &zram->stats.compr_data_size); |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 565 | atomic64_dec(&zram->stats.pages_stored); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 566 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 567 | meta->table[index].handle = 0; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 568 | zram_set_obj_size(meta, index, 0); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 569 | } |
| 570 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 571 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 572 | { |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 573 | int ret = 0; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 574 | unsigned char *cmem; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 575 | struct zram_meta *meta = zram->meta; |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 576 | unsigned long handle; |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 577 | unsigned int size; |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 578 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 579 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 580 | handle = meta->table[index].handle; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 581 | size = zram_get_obj_size(meta, index); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 582 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 583 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 584 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Minchan Kim | e3c7258 | 2017-04-13 14:56:37 -0700 | [diff] [blame] | 585 | memset(mem, 0, PAGE_SIZE); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 586 | return 0; |
| 587 | } |
| 588 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 589 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 590 | if (size == PAGE_SIZE) { |
Minchan Kim | e3c7258 | 2017-04-13 14:56:37 -0700 | [diff] [blame] | 591 | memcpy(mem, cmem, PAGE_SIZE); |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 592 | } else { |
| 593 | struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); |
| 594 | |
| 595 | ret = zcomp_decompress(zstrm, cmem, size, mem); |
| 596 | zcomp_stream_put(zram->comp); |
| 597 | } |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 598 | zs_unmap_object(meta->mem_pool, handle); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 599 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 600 | |
| 601 | /* Should NEVER happen. Return bio error if it does. */ |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 602 | if (unlikely(ret)) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 603 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 604 | return ret; |
| 605 | } |
| 606 | |
| 607 | return 0; |
| 608 | } |
| 609 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 610 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
karam.lee | b627cff | 2014-12-12 16:56:47 -0800 | [diff] [blame] | 611 | u32 index, int offset) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 612 | { |
| 613 | int ret; |
| 614 | struct page *page; |
| 615 | unsigned char *user_mem, *uncmem = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 616 | struct zram_meta *meta = zram->meta; |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 617 | page = bvec->bv_page; |
| 618 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 619 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 620 | if (unlikely(!meta->table[index].handle) || |
| 621 | zram_test_flag(meta, index, ZRAM_ZERO)) { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 622 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 623 | handle_zero_page(bvec); |
| 624 | return 0; |
| 625 | } |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 626 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 627 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 628 | if (is_partial_io(bvec)) |
| 629 | /* Use a temporary buffer to decompress the page */ |
Minchan Kim | 7e5a510 | 2013-01-30 11:41:39 +0900 | [diff] [blame] | 630 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
| 631 | |
| 632 | user_mem = kmap_atomic(page); |
| 633 | if (!is_partial_io(bvec)) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 634 | uncmem = user_mem; |
| 635 | |
| 636 | if (!uncmem) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 637 | pr_err("Unable to allocate temp memory\n"); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 638 | ret = -ENOMEM; |
| 639 | goto out_cleanup; |
| 640 | } |
| 641 | |
| 642 | ret = zram_decompress_page(zram, uncmem, index); |
| 643 | /* Should NEVER happen. Return bio error if it does. */ |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 644 | if (unlikely(ret)) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 645 | goto out_cleanup; |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 646 | |
| 647 | if (is_partial_io(bvec)) |
| 648 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, |
| 649 | bvec->bv_len); |
| 650 | |
| 651 | flush_dcache_page(page); |
| 652 | ret = 0; |
| 653 | out_cleanup: |
| 654 | kunmap_atomic(user_mem); |
| 655 | if (is_partial_io(bvec)) |
| 656 | kfree(uncmem); |
| 657 | return ret; |
| 658 | } |
| 659 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 660 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
| 661 | int offset) |
| 662 | { |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 663 | int ret = 0; |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 664 | unsigned int clen; |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 665 | unsigned long handle = 0; |
Minchan Kim | 130f315 | 2012-06-08 15:39:27 +0900 | [diff] [blame] | 666 | struct page *page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 667 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 668 | struct zram_meta *meta = zram->meta; |
Sergey Senozhatsky | 17162f4 | 2015-06-25 15:00:27 -0700 | [diff] [blame] | 669 | struct zcomp_strm *zstrm = NULL; |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 670 | unsigned long alloced_pages; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 671 | |
| 672 | page = bvec->bv_page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 673 | if (is_partial_io(bvec)) { |
| 674 | /* |
| 675 | * This is a partial IO. We need to read the full page |
| 676 | * before to write the changes. |
| 677 | */ |
Minchan Kim | 7e5a510 | 2013-01-30 11:41:39 +0900 | [diff] [blame] | 678 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 679 | if (!uncmem) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 680 | ret = -ENOMEM; |
| 681 | goto out; |
| 682 | } |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 683 | ret = zram_decompress_page(zram, uncmem, index); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 684 | if (ret) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 685 | goto out; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 686 | } |
| 687 | |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 688 | compress_again: |
Cong Wang | ba82fe2 | 2011-11-25 23:14:25 +0800 | [diff] [blame] | 689 | user_mem = kmap_atomic(page); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 690 | if (is_partial_io(bvec)) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 691 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
| 692 | bvec->bv_len); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 693 | kunmap_atomic(user_mem); |
| 694 | user_mem = NULL; |
| 695 | } else { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 696 | uncmem = user_mem; |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 697 | } |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 698 | |
| 699 | if (page_zero_filled(uncmem)) { |
Weijie Yang | c406515 | 2014-11-13 15:19:05 -0800 | [diff] [blame] | 700 | if (user_mem) |
| 701 | kunmap_atomic(user_mem); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 702 | /* Free memory associated with this sector now. */ |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 703 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 704 | zram_free_page(zram, index); |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 705 | zram_set_flag(meta, index, ZRAM_ZERO); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 706 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 707 | |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 708 | atomic64_inc(&zram->stats.zero_pages); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 709 | ret = 0; |
| 710 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 711 | } |
| 712 | |
Sergey Senozhatsky | 2aea849 | 2016-07-26 15:22:42 -0700 | [diff] [blame] | 713 | zstrm = zcomp_stream_get(zram->comp); |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 714 | ret = zcomp_compress(zstrm, uncmem, &clen); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 715 | if (!is_partial_io(bvec)) { |
| 716 | kunmap_atomic(user_mem); |
| 717 | user_mem = NULL; |
| 718 | uncmem = NULL; |
| 719 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 720 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 721 | if (unlikely(ret)) { |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 722 | pr_err("Compression failed! err=%d\n", ret); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 723 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 724 | } |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 725 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 726 | src = zstrm->buffer; |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 727 | if (unlikely(clen > max_zpage_size)) { |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 728 | clen = PAGE_SIZE; |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 729 | if (is_partial_io(bvec)) |
| 730 | src = uncmem; |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 731 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 732 | |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 733 | /* |
| 734 | * handle allocation has 2 paths: |
| 735 | * a) fast path is executed with preemption disabled (for |
| 736 | * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, |
| 737 | * since we can't sleep; |
| 738 | * b) slow path enables preemption and attempts to allocate |
| 739 | * the page with __GFP_DIRECT_RECLAIM bit set. we have to |
| 740 | * put per-cpu compression stream and, thus, to re-do |
| 741 | * the compression once handle is allocated. |
| 742 | * |
| 743 | * if we have a 'non-null' handle here then we are coming |
| 744 | * from the slow path and handle has already been allocated. |
| 745 | */ |
| 746 | if (!handle) |
| 747 | handle = zs_malloc(meta->mem_pool, clen, |
| 748 | __GFP_KSWAPD_RECLAIM | |
| 749 | __GFP_NOWARN | |
Minchan Kim | 9bc482d | 2016-07-26 15:23:34 -0700 | [diff] [blame] | 750 | __GFP_HIGHMEM | |
| 751 | __GFP_MOVABLE); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 752 | if (!handle) { |
Sergey Senozhatsky | 2aea849 | 2016-07-26 15:22:42 -0700 | [diff] [blame] | 753 | zcomp_stream_put(zram->comp); |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 754 | zstrm = NULL; |
| 755 | |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 756 | atomic64_inc(&zram->stats.writestall); |
| 757 | |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 758 | handle = zs_malloc(meta->mem_pool, clen, |
Minchan Kim | 9bc482d | 2016-07-26 15:23:34 -0700 | [diff] [blame] | 759 | GFP_NOIO | __GFP_HIGHMEM | |
| 760 | __GFP_MOVABLE); |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 761 | if (handle) |
| 762 | goto compress_again; |
| 763 | |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 764 | pr_err("Error allocating memory for compressed page: %u, size=%u\n", |
Marlies Ruck | 596b3dd | 2013-05-16 14:30:39 -0400 | [diff] [blame] | 765 | index, clen); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 766 | ret = -ENOMEM; |
| 767 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 768 | } |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 769 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 770 | alloced_pages = zs_get_total_pages(meta->mem_pool); |
Sergey SENOZHATSKY | 1237275 | 2015-11-06 16:29:04 -0800 | [diff] [blame] | 771 | update_used_max(zram, alloced_pages); |
| 772 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 773 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 774 | zs_free(meta->mem_pool, handle); |
| 775 | ret = -ENOMEM; |
| 776 | goto out; |
| 777 | } |
| 778 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 779 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 780 | |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 781 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 782 | src = kmap_atomic(page); |
Minchan Kim | e3c7258 | 2017-04-13 14:56:37 -0700 | [diff] [blame] | 783 | memcpy(cmem, src, PAGE_SIZE); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 784 | kunmap_atomic(src); |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 785 | } else { |
| 786 | memcpy(cmem, src, clen); |
| 787 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 788 | |
Sergey Senozhatsky | 2aea849 | 2016-07-26 15:22:42 -0700 | [diff] [blame] | 789 | zcomp_stream_put(zram->comp); |
Sergey Senozhatsky | 17162f4 | 2015-06-25 15:00:27 -0700 | [diff] [blame] | 790 | zstrm = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 791 | zs_unmap_object(meta->mem_pool, handle); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 792 | |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 793 | /* |
| 794 | * Free memory associated with this sector |
| 795 | * before overwriting unused sectors. |
| 796 | */ |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 797 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 798 | zram_free_page(zram, index); |
| 799 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 800 | meta->table[index].handle = handle; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 801 | zram_set_obj_size(meta, index, clen); |
| 802 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 803 | |
| 804 | /* Update stats */ |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 805 | atomic64_add(clen, &zram->stats.compr_data_size); |
| 806 | atomic64_inc(&zram->stats.pages_stored); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 807 | out: |
Sergey Senozhatsky | 17162f4 | 2015-06-25 15:00:27 -0700 | [diff] [blame] | 808 | if (zstrm) |
Sergey Senozhatsky | 2aea849 | 2016-07-26 15:22:42 -0700 | [diff] [blame] | 809 | zcomp_stream_put(zram->comp); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 810 | if (is_partial_io(bvec)) |
| 811 | kfree(uncmem); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 812 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 813 | } |
| 814 | |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 815 | /* |
| 816 | * zram_bio_discard - handler on discard request |
| 817 | * @index: physical block index in PAGE_SIZE units |
| 818 | * @offset: byte offset within physical block |
| 819 | */ |
| 820 | static void zram_bio_discard(struct zram *zram, u32 index, |
| 821 | int offset, struct bio *bio) |
| 822 | { |
| 823 | size_t n = bio->bi_iter.bi_size; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 824 | struct zram_meta *meta = zram->meta; |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 825 | |
| 826 | /* |
| 827 | * zram manages data in physical block size units. Because logical block |
| 828 | * size isn't identical with physical block size on some arch, we |
| 829 | * could get a discard request pointing to a specific offset within a |
| 830 | * certain physical block. Although we can handle this request by |
| 831 | * reading that physiclal block and decompressing and partially zeroing |
| 832 | * and re-compressing and then re-storing it, this isn't reasonable |
| 833 | * because our intent with a discard request is to save memory. So |
| 834 | * skipping this logical block is appropriate here. |
| 835 | */ |
| 836 | if (offset) { |
Weijie Yang | 38515c7 | 2014-06-04 16:11:06 -0700 | [diff] [blame] | 837 | if (n <= (PAGE_SIZE - offset)) |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 838 | return; |
| 839 | |
Weijie Yang | 38515c7 | 2014-06-04 16:11:06 -0700 | [diff] [blame] | 840 | n -= (PAGE_SIZE - offset); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 841 | index++; |
| 842 | } |
| 843 | |
| 844 | while (n >= PAGE_SIZE) { |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 845 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 846 | zram_free_page(zram, index); |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 847 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
Sergey Senozhatsky | 015254d | 2014-10-09 15:29:57 -0700 | [diff] [blame] | 848 | atomic64_inc(&zram->stats.notify_free); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 849 | index++; |
| 850 | n -= PAGE_SIZE; |
| 851 | } |
| 852 | } |
| 853 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 854 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 855 | int offset, bool is_write) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 856 | { |
| 857 | unsigned long start_time = jiffies; |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 858 | int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 859 | int ret; |
| 860 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 861 | generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 862 | &zram->disk->part0); |
| 863 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 864 | if (!is_write) { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 865 | atomic64_inc(&zram->stats.num_reads); |
| 866 | ret = zram_bvec_read(zram, bvec, index, offset); |
| 867 | } else { |
| 868 | atomic64_inc(&zram->stats.num_writes); |
| 869 | ret = zram_bvec_write(zram, bvec, index, offset); |
| 870 | } |
| 871 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 872 | generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 873 | |
| 874 | if (unlikely(ret)) { |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 875 | if (!is_write) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 876 | atomic64_inc(&zram->stats.failed_reads); |
| 877 | else |
| 878 | atomic64_inc(&zram->stats.failed_writes); |
| 879 | } |
| 880 | |
| 881 | return ret; |
| 882 | } |
| 883 | |
| 884 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
| 885 | { |
Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 886 | int offset; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 887 | u32 index; |
| 888 | struct bio_vec bvec; |
| 889 | struct bvec_iter iter; |
| 890 | |
| 891 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
| 892 | offset = (bio->bi_iter.bi_sector & |
| 893 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
| 894 | |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 895 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 896 | zram_bio_discard(zram, index, offset, bio); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 897 | bio_endio(bio); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 898 | return; |
| 899 | } |
| 900 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 901 | bio_for_each_segment(bvec, bio, iter) { |
| 902 | int max_transfer_size = PAGE_SIZE - offset; |
| 903 | |
| 904 | if (bvec.bv_len > max_transfer_size) { |
| 905 | /* |
| 906 | * zram_bvec_rw() can only make operation on a single |
| 907 | * zram page. Split the bio vector. |
| 908 | */ |
| 909 | struct bio_vec bv; |
| 910 | |
| 911 | bv.bv_page = bvec.bv_page; |
| 912 | bv.bv_len = max_transfer_size; |
| 913 | bv.bv_offset = bvec.bv_offset; |
| 914 | |
Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 915 | if (zram_bvec_rw(zram, &bv, index, offset, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 916 | op_is_write(bio_op(bio))) < 0) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 917 | goto out; |
| 918 | |
| 919 | bv.bv_len = bvec.bv_len - max_transfer_size; |
| 920 | bv.bv_offset += max_transfer_size; |
Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 921 | if (zram_bvec_rw(zram, &bv, index + 1, 0, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 922 | op_is_write(bio_op(bio))) < 0) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 923 | goto out; |
| 924 | } else |
Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 925 | if (zram_bvec_rw(zram, &bvec, index, offset, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 926 | op_is_write(bio_op(bio))) < 0) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 927 | goto out; |
| 928 | |
| 929 | update_position(&index, &offset, &bvec); |
| 930 | } |
| 931 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 932 | bio_endio(bio); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 933 | return; |
| 934 | |
| 935 | out: |
| 936 | bio_io_error(bio); |
| 937 | } |
| 938 | |
| 939 | /* |
| 940 | * Handler function for all zram I/O requests. |
| 941 | */ |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 942 | static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 943 | { |
| 944 | struct zram *zram = queue->queuedata; |
| 945 | |
| 946 | if (unlikely(!zram_meta_get(zram))) |
| 947 | goto error; |
| 948 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 949 | blk_queue_split(queue, &bio, queue->bio_split); |
| 950 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 951 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
| 952 | bio->bi_iter.bi_size)) { |
| 953 | atomic64_inc(&zram->stats.invalid_io); |
| 954 | goto put_zram; |
| 955 | } |
| 956 | |
| 957 | __zram_make_request(zram, bio); |
| 958 | zram_meta_put(zram); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 959 | return BLK_QC_T_NONE; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 960 | put_zram: |
| 961 | zram_meta_put(zram); |
| 962 | error: |
| 963 | bio_io_error(bio); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 964 | return BLK_QC_T_NONE; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 965 | } |
| 966 | |
| 967 | static void zram_slot_free_notify(struct block_device *bdev, |
| 968 | unsigned long index) |
| 969 | { |
| 970 | struct zram *zram; |
| 971 | struct zram_meta *meta; |
| 972 | |
| 973 | zram = bdev->bd_disk->private_data; |
| 974 | meta = zram->meta; |
| 975 | |
| 976 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
| 977 | zram_free_page(zram, index); |
| 978 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
| 979 | atomic64_inc(&zram->stats.notify_free); |
| 980 | } |
| 981 | |
| 982 | static int zram_rw_page(struct block_device *bdev, sector_t sector, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 983 | struct page *page, bool is_write) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 984 | { |
| 985 | int offset, err = -EIO; |
| 986 | u32 index; |
| 987 | struct zram *zram; |
| 988 | struct bio_vec bv; |
| 989 | |
| 990 | zram = bdev->bd_disk->private_data; |
| 991 | if (unlikely(!zram_meta_get(zram))) |
| 992 | goto out; |
| 993 | |
| 994 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { |
| 995 | atomic64_inc(&zram->stats.invalid_io); |
| 996 | err = -EINVAL; |
| 997 | goto put_zram; |
| 998 | } |
| 999 | |
| 1000 | index = sector >> SECTORS_PER_PAGE_SHIFT; |
| 1001 | offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; |
| 1002 | |
| 1003 | bv.bv_page = page; |
| 1004 | bv.bv_len = PAGE_SIZE; |
| 1005 | bv.bv_offset = 0; |
| 1006 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1007 | err = zram_bvec_rw(zram, &bv, index, offset, is_write); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1008 | put_zram: |
| 1009 | zram_meta_put(zram); |
| 1010 | out: |
| 1011 | /* |
| 1012 | * If I/O fails, just return error(ie, non-zero) without |
| 1013 | * calling page_endio. |
| 1014 | * It causes resubmit the I/O with bio request by upper functions |
| 1015 | * of rw_page(e.g., swap_readpage, __swap_writepage) and |
| 1016 | * bio->bi_end_io does things to handle the error |
| 1017 | * (e.g., SetPageError, set_page_dirty and extra works). |
| 1018 | */ |
| 1019 | if (err == 0) |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1020 | page_endio(page, is_write, 0); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1021 | return err; |
| 1022 | } |
| 1023 | |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1024 | static void zram_reset_device(struct zram *zram) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1025 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1026 | struct zram_meta *meta; |
| 1027 | struct zcomp *comp; |
| 1028 | u64 disksize; |
| 1029 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1030 | down_write(&zram->init_lock); |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1031 | |
| 1032 | zram->limit_pages = 0; |
| 1033 | |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 1034 | if (!init_done(zram)) { |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1035 | up_write(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1036 | return; |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1037 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1038 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1039 | meta = zram->meta; |
| 1040 | comp = zram->comp; |
| 1041 | disksize = zram->disksize; |
| 1042 | /* |
| 1043 | * Refcount will go down to 0 eventually and r/w handler |
| 1044 | * cannot handle further I/O so it will bail out by |
| 1045 | * check zram_meta_get. |
| 1046 | */ |
| 1047 | zram_meta_put(zram); |
| 1048 | /* |
| 1049 | * We want to free zram_meta in process context to avoid |
| 1050 | * deadlock between reclaim path and any other locks. |
| 1051 | */ |
| 1052 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); |
| 1053 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1054 | /* Reset stats */ |
| 1055 | memset(&zram->stats, 0, sizeof(zram->stats)); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1056 | zram->disksize = 0; |
Weijie Yang | d7ad41a | 2015-06-10 11:14:49 -0700 | [diff] [blame] | 1057 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1058 | set_capacity(zram->disk, 0); |
Weijie Yang | d7ad41a | 2015-06-10 11:14:49 -0700 | [diff] [blame] | 1059 | part_stat_set_all(&zram->disk->part0, 0); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1060 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1061 | up_write(&zram->init_lock); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1062 | /* I/O operation under all of CPU are done so let's free */ |
| 1063 | zram_meta_free(meta, disksize); |
| 1064 | zcomp_destroy(comp); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1065 | } |
| 1066 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1067 | static ssize_t disksize_store(struct device *dev, |
| 1068 | struct device_attribute *attr, const char *buf, size_t len) |
| 1069 | { |
| 1070 | u64 disksize; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1071 | struct zcomp *comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1072 | struct zram_meta *meta; |
| 1073 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1074 | int err; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1075 | |
| 1076 | disksize = memparse(buf, NULL); |
| 1077 | if (!disksize) |
| 1078 | return -EINVAL; |
| 1079 | |
| 1080 | disksize = PAGE_ALIGN(disksize); |
Sergey Senozhatsky | 4ce321f | 2015-08-14 15:35:19 -0700 | [diff] [blame] | 1081 | meta = zram_meta_alloc(zram->disk->disk_name, disksize); |
Minchan Kim | db5d711 | 2014-03-03 15:38:34 -0800 | [diff] [blame] | 1082 | if (!meta) |
| 1083 | return -ENOMEM; |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 1084 | |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1085 | comp = zcomp_create(zram->compressor); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1086 | if (IS_ERR(comp)) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1087 | pr_err("Cannot initialise %s compressing backend\n", |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1088 | zram->compressor); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1089 | err = PTR_ERR(comp); |
| 1090 | goto out_free_meta; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1091 | } |
| 1092 | |
| 1093 | down_write(&zram->init_lock); |
| 1094 | if (init_done(zram)) { |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1095 | pr_info("Cannot change disksize for initialized device\n"); |
| 1096 | err = -EBUSY; |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1097 | goto out_destroy_comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1098 | } |
| 1099 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1100 | init_waitqueue_head(&zram->io_done); |
| 1101 | atomic_set(&zram->refcount, 1); |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 1102 | zram->meta = meta; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1103 | zram->comp = comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1104 | zram->disksize = disksize; |
| 1105 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
Minchan Kim | 2e264fb | 2017-01-10 16:58:21 -0800 | [diff] [blame] | 1106 | zram_revalidate_disk(zram); |
Minchan Kim | ad4764b | 2017-01-10 16:58:18 -0800 | [diff] [blame] | 1107 | up_write(&zram->init_lock); |
Minchan Kim | b4c5c60 | 2014-07-23 14:00:04 -0700 | [diff] [blame] | 1108 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1109 | return len; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 1110 | |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1111 | out_destroy_comp: |
| 1112 | up_write(&zram->init_lock); |
| 1113 | zcomp_destroy(comp); |
| 1114 | out_free_meta: |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 1115 | zram_meta_free(meta, disksize); |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 1116 | return err; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1117 | } |
| 1118 | |
| 1119 | static ssize_t reset_store(struct device *dev, |
| 1120 | struct device_attribute *attr, const char *buf, size_t len) |
| 1121 | { |
| 1122 | int ret; |
| 1123 | unsigned short do_reset; |
| 1124 | struct zram *zram; |
| 1125 | struct block_device *bdev; |
| 1126 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1127 | ret = kstrtou16(buf, 10, &do_reset); |
| 1128 | if (ret) |
| 1129 | return ret; |
| 1130 | |
| 1131 | if (!do_reset) |
| 1132 | return -EINVAL; |
| 1133 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1134 | zram = dev_to_zram(dev); |
| 1135 | bdev = bdget_disk(zram->disk, 0); |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 1136 | if (!bdev) |
| 1137 | return -ENOMEM; |
| 1138 | |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1139 | mutex_lock(&bdev->bd_mutex); |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1140 | /* Do not reset an active device or claimed device */ |
| 1141 | if (bdev->bd_openers || zram->claim) { |
| 1142 | mutex_unlock(&bdev->bd_mutex); |
| 1143 | bdput(bdev); |
| 1144 | return -EBUSY; |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 1145 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1146 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1147 | /* From now on, anyone can't open /dev/zram[0-9] */ |
| 1148 | zram->claim = true; |
| 1149 | mutex_unlock(&bdev->bd_mutex); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1150 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1151 | /* Make sure all the pending I/O are finished */ |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 1152 | fsync_bdev(bdev); |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1153 | zram_reset_device(zram); |
Minchan Kim | 2e264fb | 2017-01-10 16:58:21 -0800 | [diff] [blame] | 1154 | zram_revalidate_disk(zram); |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 1155 | bdput(bdev); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1156 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1157 | mutex_lock(&bdev->bd_mutex); |
| 1158 | zram->claim = false; |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1159 | mutex_unlock(&bdev->bd_mutex); |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1160 | |
| 1161 | return len; |
| 1162 | } |
| 1163 | |
| 1164 | static int zram_open(struct block_device *bdev, fmode_t mode) |
| 1165 | { |
| 1166 | int ret = 0; |
| 1167 | struct zram *zram; |
| 1168 | |
| 1169 | WARN_ON(!mutex_is_locked(&bdev->bd_mutex)); |
| 1170 | |
| 1171 | zram = bdev->bd_disk->private_data; |
| 1172 | /* zram was claimed to reset so open request fails */ |
| 1173 | if (zram->claim) |
| 1174 | ret = -EBUSY; |
| 1175 | |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 1176 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1177 | } |
| 1178 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1179 | static const struct block_device_operations zram_devops = { |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1180 | .open = zram_open, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1181 | .swap_slot_free_notify = zram_slot_free_notify, |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 1182 | .rw_page = zram_rw_page, |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 1183 | .owner = THIS_MODULE |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1184 | }; |
| 1185 | |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 1186 | static DEVICE_ATTR_WO(compact); |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 1187 | static DEVICE_ATTR_RW(disksize); |
| 1188 | static DEVICE_ATTR_RO(initstate); |
| 1189 | static DEVICE_ATTR_WO(reset); |
| 1190 | static DEVICE_ATTR_RO(orig_data_size); |
| 1191 | static DEVICE_ATTR_RO(mem_used_total); |
| 1192 | static DEVICE_ATTR_RW(mem_limit); |
| 1193 | static DEVICE_ATTR_RW(mem_used_max); |
| 1194 | static DEVICE_ATTR_RW(max_comp_streams); |
| 1195 | static DEVICE_ATTR_RW(comp_algorithm); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1196 | |
| 1197 | static struct attribute *zram_disk_attrs[] = { |
| 1198 | &dev_attr_disksize.attr, |
| 1199 | &dev_attr_initstate.attr, |
| 1200 | &dev_attr_reset.attr, |
| 1201 | &dev_attr_num_reads.attr, |
| 1202 | &dev_attr_num_writes.attr, |
Sergey Senozhatsky | 6444724 | 2014-04-07 15:38:05 -0700 | [diff] [blame] | 1203 | &dev_attr_failed_reads.attr, |
| 1204 | &dev_attr_failed_writes.attr, |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 1205 | &dev_attr_compact.attr, |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1206 | &dev_attr_invalid_io.attr, |
| 1207 | &dev_attr_notify_free.attr, |
| 1208 | &dev_attr_zero_pages.attr, |
| 1209 | &dev_attr_orig_data_size.attr, |
| 1210 | &dev_attr_compr_data_size.attr, |
| 1211 | &dev_attr_mem_used_total.attr, |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1212 | &dev_attr_mem_limit.attr, |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 1213 | &dev_attr_mem_used_max.attr, |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 1214 | &dev_attr_max_comp_streams.attr, |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1215 | &dev_attr_comp_algorithm.attr, |
Sergey Senozhatsky | 2f6a3be | 2015-04-15 16:16:03 -0700 | [diff] [blame] | 1216 | &dev_attr_io_stat.attr, |
Sergey Senozhatsky | 4f2109f | 2015-04-15 16:16:06 -0700 | [diff] [blame] | 1217 | &dev_attr_mm_stat.attr, |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1218 | &dev_attr_debug_stat.attr, |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1219 | NULL, |
| 1220 | }; |
| 1221 | |
| 1222 | static struct attribute_group zram_disk_attr_group = { |
| 1223 | .attrs = zram_disk_attrs, |
| 1224 | }; |
| 1225 | |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1226 | /* |
| 1227 | * Allocate and initialize new zram device. the function returns |
| 1228 | * '>= 0' device_id upon success, and negative value otherwise. |
| 1229 | */ |
| 1230 | static int zram_add(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1231 | { |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1232 | struct zram *zram; |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1233 | struct request_queue *queue; |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1234 | int ret, device_id; |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1235 | |
| 1236 | zram = kzalloc(sizeof(struct zram), GFP_KERNEL); |
| 1237 | if (!zram) |
| 1238 | return -ENOMEM; |
| 1239 | |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1240 | ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1241 | if (ret < 0) |
| 1242 | goto out_free_dev; |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1243 | device_id = ret; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1244 | |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 1245 | init_rwsem(&zram->init_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1246 | |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1247 | queue = blk_alloc_queue(GFP_KERNEL); |
| 1248 | if (!queue) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1249 | pr_err("Error allocating disk queue for device %d\n", |
| 1250 | device_id); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1251 | ret = -ENOMEM; |
| 1252 | goto out_free_idr; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1253 | } |
| 1254 | |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1255 | blk_queue_make_request(queue, zram_make_request); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1256 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1257 | /* gendisk structure */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1258 | zram->disk = alloc_disk(1); |
| 1259 | if (!zram->disk) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1260 | pr_err("Error allocating disk structure for device %d\n", |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1261 | device_id); |
Julia Lawall | 201c7b7 | 2015-04-15 16:16:27 -0700 | [diff] [blame] | 1262 | ret = -ENOMEM; |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1263 | goto out_free_queue; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1264 | } |
| 1265 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1266 | zram->disk->major = zram_major; |
| 1267 | zram->disk->first_minor = device_id; |
| 1268 | zram->disk->fops = &zram_devops; |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1269 | zram->disk->queue = queue; |
| 1270 | zram->disk->queue->queuedata = zram; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1271 | zram->disk->private_data = zram; |
| 1272 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1273 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1274 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1275 | set_capacity(zram->disk, 0); |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 1276 | /* zram devices sort of resembles non-rotational disks */ |
| 1277 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 1278 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 1279 | /* |
| 1280 | * To ensure that we always get PAGE_SIZE aligned |
| 1281 | * and n*PAGE_SIZED sized I/O requests. |
| 1282 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1283 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
Robert Jennings | 7b19b8d | 2011-01-28 08:58:17 -0600 | [diff] [blame] | 1284 | blk_queue_logical_block_size(zram->disk->queue, |
| 1285 | ZRAM_LOGICAL_BLOCK_SIZE); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1286 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
| 1287 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1288 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 1289 | blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1290 | /* |
| 1291 | * zram_bio_discard() will clear all logical blocks if logical block |
| 1292 | * size is identical with physical block size(PAGE_SIZE). But if it is |
| 1293 | * different, we will skip discarding some parts of logical blocks in |
| 1294 | * the part of the request range which isn't aligned to physical block |
| 1295 | * size. So we can't ensure that all discarded logical blocks are |
| 1296 | * zeroed. |
| 1297 | */ |
| 1298 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) |
| 1299 | zram->disk->queue->limits.discard_zeroes_data = 1; |
| 1300 | else |
| 1301 | zram->disk->queue->limits.discard_zeroes_data = 0; |
| 1302 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 1303 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1304 | add_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1305 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1306 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
| 1307 | &zram_disk_attr_group); |
| 1308 | if (ret < 0) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1309 | pr_err("Error creating sysfs group for device %d\n", |
| 1310 | device_id); |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1311 | goto out_free_disk; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1312 | } |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1313 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 1314 | zram->meta = NULL; |
Sergey Senozhatsky | d12b63c | 2015-06-25 15:00:14 -0700 | [diff] [blame] | 1315 | |
| 1316 | pr_info("Added device: %s\n", zram->disk->disk_name); |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1317 | return device_id; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1318 | |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1319 | out_free_disk: |
| 1320 | del_gendisk(zram->disk); |
| 1321 | put_disk(zram->disk); |
| 1322 | out_free_queue: |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1323 | blk_cleanup_queue(queue); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1324 | out_free_idr: |
| 1325 | idr_remove(&zram_index_idr, device_id); |
| 1326 | out_free_dev: |
| 1327 | kfree(zram); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1328 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1329 | } |
| 1330 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1331 | static int zram_remove(struct zram *zram) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1332 | { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1333 | struct block_device *bdev; |
| 1334 | |
| 1335 | bdev = bdget_disk(zram->disk, 0); |
| 1336 | if (!bdev) |
| 1337 | return -ENOMEM; |
| 1338 | |
| 1339 | mutex_lock(&bdev->bd_mutex); |
| 1340 | if (bdev->bd_openers || zram->claim) { |
| 1341 | mutex_unlock(&bdev->bd_mutex); |
| 1342 | bdput(bdev); |
| 1343 | return -EBUSY; |
| 1344 | } |
| 1345 | |
| 1346 | zram->claim = true; |
| 1347 | mutex_unlock(&bdev->bd_mutex); |
| 1348 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1349 | /* |
| 1350 | * Remove sysfs first, so no one will perform a disksize |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1351 | * store while we destroy the devices. This also helps during |
| 1352 | * hot_remove -- zram_reset_device() is the last holder of |
| 1353 | * ->init_lock, no later/concurrent disksize_store() or any |
| 1354 | * other sysfs handlers are possible. |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1355 | */ |
| 1356 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
| 1357 | &zram_disk_attr_group); |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1358 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1359 | /* Make sure all the pending I/O are finished */ |
| 1360 | fsync_bdev(bdev); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1361 | zram_reset_device(zram); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1362 | bdput(bdev); |
| 1363 | |
| 1364 | pr_info("Removed device: %s\n", zram->disk->disk_name); |
| 1365 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1366 | blk_cleanup_queue(zram->disk->queue); |
| 1367 | del_gendisk(zram->disk); |
| 1368 | put_disk(zram->disk); |
| 1369 | kfree(zram); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1370 | return 0; |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1371 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1372 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1373 | /* zram-control sysfs attributes */ |
| 1374 | static ssize_t hot_add_show(struct class *class, |
| 1375 | struct class_attribute *attr, |
| 1376 | char *buf) |
| 1377 | { |
| 1378 | int ret; |
| 1379 | |
| 1380 | mutex_lock(&zram_index_mutex); |
| 1381 | ret = zram_add(); |
| 1382 | mutex_unlock(&zram_index_mutex); |
| 1383 | |
| 1384 | if (ret < 0) |
| 1385 | return ret; |
| 1386 | return scnprintf(buf, PAGE_SIZE, "%d\n", ret); |
| 1387 | } |
| 1388 | |
| 1389 | static ssize_t hot_remove_store(struct class *class, |
| 1390 | struct class_attribute *attr, |
| 1391 | const char *buf, |
| 1392 | size_t count) |
| 1393 | { |
| 1394 | struct zram *zram; |
| 1395 | int ret, dev_id; |
| 1396 | |
| 1397 | /* dev_id is gendisk->first_minor, which is `int' */ |
| 1398 | ret = kstrtoint(buf, 10, &dev_id); |
| 1399 | if (ret) |
| 1400 | return ret; |
| 1401 | if (dev_id < 0) |
| 1402 | return -EINVAL; |
| 1403 | |
| 1404 | mutex_lock(&zram_index_mutex); |
| 1405 | |
| 1406 | zram = idr_find(&zram_index_idr, dev_id); |
Jerome Marchand | 17ec4cd | 2016-01-15 16:54:48 -0800 | [diff] [blame] | 1407 | if (zram) { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1408 | ret = zram_remove(zram); |
Takashi Iwai | 529e71e | 2016-11-30 15:54:08 -0800 | [diff] [blame] | 1409 | if (!ret) |
| 1410 | idr_remove(&zram_index_idr, dev_id); |
Jerome Marchand | 17ec4cd | 2016-01-15 16:54:48 -0800 | [diff] [blame] | 1411 | } else { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1412 | ret = -ENODEV; |
Jerome Marchand | 17ec4cd | 2016-01-15 16:54:48 -0800 | [diff] [blame] | 1413 | } |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1414 | |
| 1415 | mutex_unlock(&zram_index_mutex); |
| 1416 | return ret ? ret : count; |
| 1417 | } |
| 1418 | |
Sergey Senozhatsky | 5c7e9cc | 2016-12-07 14:44:31 -0800 | [diff] [blame] | 1419 | /* |
| 1420 | * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a |
| 1421 | * sense that reading from this file does alter the state of your system -- it |
| 1422 | * creates a new un-initialized zram device and returns back this device's |
| 1423 | * device_id (or an error code if it fails to create a new device). |
| 1424 | */ |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1425 | static struct class_attribute zram_control_class_attrs[] = { |
Sergey Senozhatsky | 5c7e9cc | 2016-12-07 14:44:31 -0800 | [diff] [blame] | 1426 | __ATTR(hot_add, 0400, hot_add_show, NULL), |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1427 | __ATTR_WO(hot_remove), |
| 1428 | __ATTR_NULL, |
| 1429 | }; |
| 1430 | |
| 1431 | static struct class zram_control_class = { |
| 1432 | .name = "zram-control", |
| 1433 | .owner = THIS_MODULE, |
| 1434 | .class_attrs = zram_control_class_attrs, |
| 1435 | }; |
| 1436 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1437 | static int zram_remove_cb(int id, void *ptr, void *data) |
| 1438 | { |
| 1439 | zram_remove(ptr); |
| 1440 | return 0; |
| 1441 | } |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1442 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1443 | static void destroy_devices(void) |
| 1444 | { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1445 | class_unregister(&zram_control_class); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1446 | idr_for_each(&zram_index_idr, &zram_remove_cb, NULL); |
| 1447 | idr_destroy(&zram_index_idr); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1448 | unregister_blkdev(zram_major, "zram"); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1449 | } |
| 1450 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1451 | static int __init zram_init(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1452 | { |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1453 | int ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1454 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1455 | ret = class_register(&zram_control_class); |
| 1456 | if (ret) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1457 | pr_err("Unable to register zram-control class\n"); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1458 | return ret; |
| 1459 | } |
| 1460 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1461 | zram_major = register_blkdev(0, "zram"); |
| 1462 | if (zram_major <= 0) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1463 | pr_err("Unable to get major number\n"); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1464 | class_unregister(&zram_control_class); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1465 | return -EBUSY; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1466 | } |
| 1467 | |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1468 | while (num_devices != 0) { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1469 | mutex_lock(&zram_index_mutex); |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1470 | ret = zram_add(); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1471 | mutex_unlock(&zram_index_mutex); |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1472 | if (ret < 0) |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1473 | goto out_error; |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1474 | num_devices--; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1475 | } |
| 1476 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1477 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1478 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1479 | out_error: |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1480 | destroy_devices(); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1481 | return ret; |
| 1482 | } |
| 1483 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1484 | static void __exit zram_exit(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1485 | { |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1486 | destroy_devices(); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1487 | } |
| 1488 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1489 | module_init(zram_init); |
| 1490 | module_exit(zram_exit); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1491 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1492 | module_param(num_devices, uint, 0); |
Sergey Senozhatsky | c3cdb40 | 2015-06-25 15:00:11 -0700 | [diff] [blame] | 1493 | MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices"); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1494 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1495 | MODULE_LICENSE("Dual BSD/GPL"); |
| 1496 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1497 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |