Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2 | * Compressed RAM block device |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 3 | * |
Nitin Gupta | 1130ebb | 2010-01-28 21:21:35 +0530 | [diff] [blame] | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
Minchan Kim | 7bfb3de | 2014-01-30 15:45:55 -0800 | [diff] [blame^] | 5 | * 2012, 2013 Minchan Kim |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 6 | * |
| 7 | * This code is released using a dual license strategy: BSD/GPL |
| 8 | * You can choose the licence that better fits your requirements. |
| 9 | * |
| 10 | * Released under the terms of 3-clause BSD License |
| 11 | * Released under the terms of GNU General Public License Version 2.0 |
| 12 | * |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 13 | */ |
| 14 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 15 | #define KMSG_COMPONENT "zram" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 17 | |
Robert Jennings | b1f5b81 | 2011-01-28 08:59:26 -0600 | [diff] [blame] | 18 | #ifdef CONFIG_ZRAM_DEBUG |
| 19 | #define DEBUG |
| 20 | #endif |
| 21 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 22 | #include <linux/module.h> |
| 23 | #include <linux/kernel.h> |
Randy Dunlap | 8946a08 | 2010-06-23 20:27:09 -0700 | [diff] [blame] | 24 | #include <linux/bio.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 25 | #include <linux/bitops.h> |
| 26 | #include <linux/blkdev.h> |
| 27 | #include <linux/buffer_head.h> |
| 28 | #include <linux/device.h> |
| 29 | #include <linux/genhd.h> |
| 30 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 32 | #include <linux/lzo.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 33 | #include <linux/string.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 34 | #include <linux/vmalloc.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 35 | |
Nitin Gupta | 16a4bfb | 2010-06-01 13:31:24 +0530 | [diff] [blame] | 36 | #include "zram_drv.h" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 37 | |
| 38 | /* Globals */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 39 | static int zram_major; |
Jiang Liu | 0f0e3ba | 2013-06-07 00:07:29 +0800 | [diff] [blame] | 40 | static struct zram *zram_devices; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 41 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 42 | /* Module params (documentation at end) */ |
Davidlohr Bueso | ca3d70b | 2013-01-01 21:24:13 -0800 | [diff] [blame] | 43 | static unsigned int num_devices = 1; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 44 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 45 | static inline struct zram *dev_to_zram(struct device *dev) |
| 46 | { |
| 47 | return (struct zram *)dev_to_disk(dev)->private_data; |
| 48 | } |
| 49 | |
| 50 | static ssize_t disksize_show(struct device *dev, |
| 51 | struct device_attribute *attr, char *buf) |
| 52 | { |
| 53 | struct zram *zram = dev_to_zram(dev); |
| 54 | |
| 55 | return sprintf(buf, "%llu\n", zram->disksize); |
| 56 | } |
| 57 | |
| 58 | static ssize_t initstate_show(struct device *dev, |
| 59 | struct device_attribute *attr, char *buf) |
| 60 | { |
| 61 | struct zram *zram = dev_to_zram(dev); |
| 62 | |
| 63 | return sprintf(buf, "%u\n", zram->init_done); |
| 64 | } |
| 65 | |
| 66 | static ssize_t num_reads_show(struct device *dev, |
| 67 | struct device_attribute *attr, char *buf) |
| 68 | { |
| 69 | struct zram *zram = dev_to_zram(dev); |
| 70 | |
| 71 | return sprintf(buf, "%llu\n", |
| 72 | (u64)atomic64_read(&zram->stats.num_reads)); |
| 73 | } |
| 74 | |
| 75 | static ssize_t num_writes_show(struct device *dev, |
| 76 | struct device_attribute *attr, char *buf) |
| 77 | { |
| 78 | struct zram *zram = dev_to_zram(dev); |
| 79 | |
| 80 | return sprintf(buf, "%llu\n", |
| 81 | (u64)atomic64_read(&zram->stats.num_writes)); |
| 82 | } |
| 83 | |
| 84 | static ssize_t invalid_io_show(struct device *dev, |
| 85 | struct device_attribute *attr, char *buf) |
| 86 | { |
| 87 | struct zram *zram = dev_to_zram(dev); |
| 88 | |
| 89 | return sprintf(buf, "%llu\n", |
| 90 | (u64)atomic64_read(&zram->stats.invalid_io)); |
| 91 | } |
| 92 | |
| 93 | static ssize_t notify_free_show(struct device *dev, |
| 94 | struct device_attribute *attr, char *buf) |
| 95 | { |
| 96 | struct zram *zram = dev_to_zram(dev); |
| 97 | |
| 98 | return sprintf(buf, "%llu\n", |
| 99 | (u64)atomic64_read(&zram->stats.notify_free)); |
| 100 | } |
| 101 | |
| 102 | static ssize_t zero_pages_show(struct device *dev, |
| 103 | struct device_attribute *attr, char *buf) |
| 104 | { |
| 105 | struct zram *zram = dev_to_zram(dev); |
| 106 | |
| 107 | return sprintf(buf, "%u\n", zram->stats.pages_zero); |
| 108 | } |
| 109 | |
| 110 | static ssize_t orig_data_size_show(struct device *dev, |
| 111 | struct device_attribute *attr, char *buf) |
| 112 | { |
| 113 | struct zram *zram = dev_to_zram(dev); |
| 114 | |
| 115 | return sprintf(buf, "%llu\n", |
| 116 | (u64)(zram->stats.pages_stored) << PAGE_SHIFT); |
| 117 | } |
| 118 | |
| 119 | static ssize_t compr_data_size_show(struct device *dev, |
| 120 | struct device_attribute *attr, char *buf) |
| 121 | { |
| 122 | struct zram *zram = dev_to_zram(dev); |
| 123 | |
| 124 | return sprintf(buf, "%llu\n", |
| 125 | (u64)atomic64_read(&zram->stats.compr_size)); |
| 126 | } |
| 127 | |
| 128 | static ssize_t mem_used_total_show(struct device *dev, |
| 129 | struct device_attribute *attr, char *buf) |
| 130 | { |
| 131 | u64 val = 0; |
| 132 | struct zram *zram = dev_to_zram(dev); |
| 133 | struct zram_meta *meta = zram->meta; |
| 134 | |
| 135 | down_read(&zram->init_lock); |
| 136 | if (zram->init_done) |
| 137 | val = zs_get_total_size_bytes(meta->mem_pool); |
| 138 | up_read(&zram->init_lock); |
| 139 | |
| 140 | return sprintf(buf, "%llu\n", val); |
| 141 | } |
| 142 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 143 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 144 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 145 | { |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 146 | return meta->table[index].flags & BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 147 | } |
| 148 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 149 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 150 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 151 | { |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 152 | meta->table[index].flags |= BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 153 | } |
| 154 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 155 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 156 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 157 | { |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 158 | meta->table[index].flags &= ~BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 159 | } |
| 160 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 161 | static inline int is_partial_io(struct bio_vec *bvec) |
| 162 | { |
| 163 | return bvec->bv_len != PAGE_SIZE; |
| 164 | } |
| 165 | |
| 166 | /* |
| 167 | * Check if request is within bounds and aligned on zram logical blocks. |
| 168 | */ |
| 169 | static inline int valid_io_request(struct zram *zram, struct bio *bio) |
| 170 | { |
| 171 | u64 start, end, bound; |
Kumar Gaurav | a539c72 | 2013-08-08 23:53:24 +0530 | [diff] [blame] | 172 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 173 | /* unaligned request */ |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 174 | if (unlikely(bio->bi_iter.bi_sector & |
| 175 | (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 176 | return 0; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 177 | if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 178 | return 0; |
| 179 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 180 | start = bio->bi_iter.bi_sector; |
| 181 | end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 182 | bound = zram->disksize >> SECTOR_SHIFT; |
| 183 | /* out of range range */ |
Sergey Senozhatsky | 75c7caf | 2013-06-22 17:21:00 +0300 | [diff] [blame] | 184 | if (unlikely(start >= bound || end > bound || start > end)) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 185 | return 0; |
| 186 | |
| 187 | /* I/O request is valid */ |
| 188 | return 1; |
| 189 | } |
| 190 | |
| 191 | static void zram_meta_free(struct zram_meta *meta) |
| 192 | { |
| 193 | zs_destroy_pool(meta->mem_pool); |
| 194 | kfree(meta->compress_workmem); |
| 195 | free_pages((unsigned long)meta->compress_buffer, 1); |
| 196 | vfree(meta->table); |
| 197 | kfree(meta); |
| 198 | } |
| 199 | |
| 200 | static struct zram_meta *zram_meta_alloc(u64 disksize) |
| 201 | { |
| 202 | size_t num_pages; |
| 203 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); |
| 204 | if (!meta) |
| 205 | goto out; |
| 206 | |
| 207 | meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); |
| 208 | if (!meta->compress_workmem) |
| 209 | goto free_meta; |
| 210 | |
| 211 | meta->compress_buffer = |
| 212 | (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); |
| 213 | if (!meta->compress_buffer) { |
| 214 | pr_err("Error allocating compressor buffer space\n"); |
| 215 | goto free_workmem; |
| 216 | } |
| 217 | |
| 218 | num_pages = disksize >> PAGE_SHIFT; |
| 219 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); |
| 220 | if (!meta->table) { |
| 221 | pr_err("Error allocating zram address table\n"); |
| 222 | goto free_buffer; |
| 223 | } |
| 224 | |
| 225 | meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM); |
| 226 | if (!meta->mem_pool) { |
| 227 | pr_err("Error creating memory pool\n"); |
| 228 | goto free_table; |
| 229 | } |
| 230 | |
| 231 | return meta; |
| 232 | |
| 233 | free_table: |
| 234 | vfree(meta->table); |
| 235 | free_buffer: |
| 236 | free_pages((unsigned long)meta->compress_buffer, 1); |
| 237 | free_workmem: |
| 238 | kfree(meta->compress_workmem); |
| 239 | free_meta: |
| 240 | kfree(meta); |
| 241 | meta = NULL; |
| 242 | out: |
| 243 | return meta; |
| 244 | } |
| 245 | |
| 246 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) |
| 247 | { |
| 248 | if (*offset + bvec->bv_len >= PAGE_SIZE) |
| 249 | (*index)++; |
| 250 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; |
| 251 | } |
| 252 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 253 | static int page_zero_filled(void *ptr) |
| 254 | { |
| 255 | unsigned int pos; |
| 256 | unsigned long *page; |
| 257 | |
| 258 | page = (unsigned long *)ptr; |
| 259 | |
| 260 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { |
| 261 | if (page[pos]) |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | return 1; |
| 266 | } |
| 267 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 268 | static void handle_zero_page(struct bio_vec *bvec) |
| 269 | { |
| 270 | struct page *page = bvec->bv_page; |
| 271 | void *user_mem; |
| 272 | |
| 273 | user_mem = kmap_atomic(page); |
| 274 | if (is_partial_io(bvec)) |
| 275 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); |
| 276 | else |
| 277 | clear_page(user_mem); |
| 278 | kunmap_atomic(user_mem); |
| 279 | |
| 280 | flush_dcache_page(page); |
| 281 | } |
| 282 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 283 | static void zram_free_page(struct zram *zram, size_t index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 284 | { |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 285 | struct zram_meta *meta = zram->meta; |
| 286 | unsigned long handle = meta->table[index].handle; |
| 287 | u16 size = meta->table[index].size; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 288 | |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 289 | if (unlikely(!handle)) { |
Nitin Gupta | 2e88228 | 2010-01-28 21:13:41 +0530 | [diff] [blame] | 290 | /* |
| 291 | * No memory is allocated for zero filled pages. |
| 292 | * Simply clear zero page flag. |
| 293 | */ |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 294 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
| 295 | zram_clear_flag(meta, index, ZRAM_ZERO); |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 296 | zram->stats.pages_zero--; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 297 | } |
| 298 | return; |
| 299 | } |
| 300 | |
Minchan Kim | 130f315 | 2012-06-08 15:39:27 +0900 | [diff] [blame] | 301 | if (unlikely(size > max_zpage_size)) |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 302 | zram->stats.bad_compress--; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 303 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 304 | zs_free(meta->mem_pool, handle); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 305 | |
Minchan Kim | 130f315 | 2012-06-08 15:39:27 +0900 | [diff] [blame] | 306 | if (size <= PAGE_SIZE / 2) |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 307 | zram->stats.good_compress--; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 308 | |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 309 | atomic64_sub(meta->table[index].size, &zram->stats.compr_size); |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 310 | zram->stats.pages_stored--; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 311 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 312 | meta->table[index].handle = 0; |
| 313 | meta->table[index].size = 0; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 314 | } |
| 315 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 316 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 317 | { |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 318 | int ret = LZO_E_OK; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 319 | size_t clen = PAGE_SIZE; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 320 | unsigned char *cmem; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 321 | struct zram_meta *meta = zram->meta; |
| 322 | unsigned long handle = meta->table[index].handle; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 323 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 324 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 325 | clear_page(mem); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 326 | return 0; |
| 327 | } |
| 328 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 329 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
| 330 | if (meta->table[index].size == PAGE_SIZE) |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 331 | copy_page(mem, cmem); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 332 | else |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 333 | ret = lzo1x_decompress_safe(cmem, meta->table[index].size, |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 334 | mem, &clen); |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 335 | zs_unmap_object(meta->mem_pool, handle); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 336 | |
| 337 | /* Should NEVER happen. Return bio error if it does. */ |
| 338 | if (unlikely(ret != LZO_E_OK)) { |
| 339 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 340 | atomic64_inc(&zram->stats.failed_reads); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 341 | return ret; |
| 342 | } |
| 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 347 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
| 348 | u32 index, int offset, struct bio *bio) |
| 349 | { |
| 350 | int ret; |
| 351 | struct page *page; |
| 352 | unsigned char *user_mem, *uncmem = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 353 | struct zram_meta *meta = zram->meta; |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 354 | page = bvec->bv_page; |
| 355 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 356 | if (unlikely(!meta->table[index].handle) || |
| 357 | zram_test_flag(meta, index, ZRAM_ZERO)) { |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 358 | handle_zero_page(bvec); |
| 359 | return 0; |
| 360 | } |
| 361 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 362 | if (is_partial_io(bvec)) |
| 363 | /* Use a temporary buffer to decompress the page */ |
Minchan Kim | 7e5a510 | 2013-01-30 11:41:39 +0900 | [diff] [blame] | 364 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
| 365 | |
| 366 | user_mem = kmap_atomic(page); |
| 367 | if (!is_partial_io(bvec)) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 368 | uncmem = user_mem; |
| 369 | |
| 370 | if (!uncmem) { |
| 371 | pr_info("Unable to allocate temp memory\n"); |
| 372 | ret = -ENOMEM; |
| 373 | goto out_cleanup; |
| 374 | } |
| 375 | |
| 376 | ret = zram_decompress_page(zram, uncmem, index); |
| 377 | /* Should NEVER happen. Return bio error if it does. */ |
Wanpeng Li | 25eeb66 | 2013-03-13 15:06:16 +0800 | [diff] [blame] | 378 | if (unlikely(ret != LZO_E_OK)) |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 379 | goto out_cleanup; |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 380 | |
| 381 | if (is_partial_io(bvec)) |
| 382 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, |
| 383 | bvec->bv_len); |
| 384 | |
| 385 | flush_dcache_page(page); |
| 386 | ret = 0; |
| 387 | out_cleanup: |
| 388 | kunmap_atomic(user_mem); |
| 389 | if (is_partial_io(bvec)) |
| 390 | kfree(uncmem); |
| 391 | return ret; |
| 392 | } |
| 393 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 394 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
| 395 | int offset) |
| 396 | { |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 397 | int ret = 0; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 398 | size_t clen; |
Minchan Kim | c234434 | 2012-06-08 15:39:25 +0900 | [diff] [blame] | 399 | unsigned long handle; |
Minchan Kim | 130f315 | 2012-06-08 15:39:27 +0900 | [diff] [blame] | 400 | struct page *page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 401 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 402 | struct zram_meta *meta = zram->meta; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 403 | |
| 404 | page = bvec->bv_page; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 405 | src = meta->compress_buffer; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 406 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 407 | if (is_partial_io(bvec)) { |
| 408 | /* |
| 409 | * This is a partial IO. We need to read the full page |
| 410 | * before to write the changes. |
| 411 | */ |
Minchan Kim | 7e5a510 | 2013-01-30 11:41:39 +0900 | [diff] [blame] | 412 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 413 | if (!uncmem) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 414 | ret = -ENOMEM; |
| 415 | goto out; |
| 416 | } |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 417 | ret = zram_decompress_page(zram, uncmem, index); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 418 | if (ret) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 419 | goto out; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 420 | } |
| 421 | |
Cong Wang | ba82fe2 | 2011-11-25 23:14:25 +0800 | [diff] [blame] | 422 | user_mem = kmap_atomic(page); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 423 | |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 424 | if (is_partial_io(bvec)) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 425 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
| 426 | bvec->bv_len); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 427 | kunmap_atomic(user_mem); |
| 428 | user_mem = NULL; |
| 429 | } else { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 430 | uncmem = user_mem; |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 431 | } |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 432 | |
| 433 | if (page_zero_filled(uncmem)) { |
Cong Wang | ba82fe2 | 2011-11-25 23:14:25 +0800 | [diff] [blame] | 434 | kunmap_atomic(user_mem); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 435 | /* Free memory associated with this sector now. */ |
| 436 | zram_free_page(zram, index); |
| 437 | |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 438 | zram->stats.pages_zero++; |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 439 | zram_set_flag(meta, index, ZRAM_ZERO); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 440 | ret = 0; |
| 441 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 442 | } |
| 443 | |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 444 | /* |
| 445 | * zram_slot_free_notify could miss free so that let's |
| 446 | * double check. |
| 447 | */ |
| 448 | if (unlikely(meta->table[index].handle || |
| 449 | zram_test_flag(meta, index, ZRAM_ZERO))) |
| 450 | zram_free_page(zram, index); |
| 451 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 452 | ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 453 | meta->compress_workmem); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 454 | |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 455 | if (!is_partial_io(bvec)) { |
| 456 | kunmap_atomic(user_mem); |
| 457 | user_mem = NULL; |
| 458 | uncmem = NULL; |
| 459 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 460 | |
| 461 | if (unlikely(ret != LZO_E_OK)) { |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 462 | pr_err("Compression failed! err=%d\n", ret); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 463 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 464 | } |
| 465 | |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 466 | if (unlikely(clen > max_zpage_size)) { |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 467 | zram->stats.bad_compress++; |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 468 | clen = PAGE_SIZE; |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 469 | src = NULL; |
| 470 | if (is_partial_io(bvec)) |
| 471 | src = uncmem; |
Nitin Gupta | c8f2f0d | 2012-10-10 17:42:18 -0700 | [diff] [blame] | 472 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 473 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 474 | handle = zs_malloc(meta->mem_pool, clen); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 475 | if (!handle) { |
Marlies Ruck | 596b3dd | 2013-05-16 14:30:39 -0400 | [diff] [blame] | 476 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", |
| 477 | index, clen); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 478 | ret = -ENOMEM; |
| 479 | goto out; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 480 | } |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 481 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 482 | |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 483 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 484 | src = kmap_atomic(page); |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 485 | copy_page(cmem, src); |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 486 | kunmap_atomic(src); |
Jiang Liu | 42e99bd | 2013-06-07 00:07:30 +0800 | [diff] [blame] | 487 | } else { |
| 488 | memcpy(cmem, src, clen); |
| 489 | } |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 490 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 491 | zs_unmap_object(meta->mem_pool, handle); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 492 | |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 493 | /* |
| 494 | * Free memory associated with this sector |
| 495 | * before overwriting unused sectors. |
| 496 | */ |
| 497 | zram_free_page(zram, index); |
| 498 | |
Minchan Kim | 8b3cc3e | 2013-02-06 08:48:53 +0900 | [diff] [blame] | 499 | meta->table[index].handle = handle; |
| 500 | meta->table[index].size = clen; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 501 | |
| 502 | /* Update stats */ |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 503 | atomic64_add(clen, &zram->stats.compr_size); |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 504 | zram->stats.pages_stored++; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 505 | if (clen <= PAGE_SIZE / 2) |
Davidlohr Bueso | d178a07 | 2013-01-01 21:24:29 -0800 | [diff] [blame] | 506 | zram->stats.good_compress++; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 507 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 508 | out: |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 509 | if (is_partial_io(bvec)) |
| 510 | kfree(uncmem); |
| 511 | |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 512 | if (ret) |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 513 | atomic64_inc(&zram->stats.failed_writes); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 514 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 515 | } |
| 516 | |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 517 | static void handle_pending_slot_free(struct zram *zram) |
| 518 | { |
| 519 | struct zram_slot_free *free_rq; |
| 520 | |
| 521 | spin_lock(&zram->slot_free_lock); |
| 522 | while (zram->slot_free_rq) { |
| 523 | free_rq = zram->slot_free_rq; |
| 524 | zram->slot_free_rq = free_rq->next; |
| 525 | zram_free_page(zram, free_rq->index); |
| 526 | kfree(free_rq); |
| 527 | } |
| 528 | spin_unlock(&zram->slot_free_lock); |
| 529 | } |
| 530 | |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 531 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 532 | int offset, struct bio *bio, int rw) |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 533 | { |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 534 | int ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 535 | |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 536 | if (rw == READ) { |
| 537 | down_read(&zram->lock); |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 538 | handle_pending_slot_free(zram); |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 539 | ret = zram_bvec_read(zram, bvec, index, offset, bio); |
| 540 | up_read(&zram->lock); |
| 541 | } else { |
| 542 | down_write(&zram->lock); |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 543 | handle_pending_slot_free(zram); |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 544 | ret = zram_bvec_write(zram, bvec, index, offset); |
| 545 | up_write(&zram->lock); |
| 546 | } |
| 547 | |
| 548 | return ret; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 549 | } |
| 550 | |
Minchan Kim | 2b86ab9 | 2013-08-12 15:13:55 +0900 | [diff] [blame] | 551 | static void zram_reset_device(struct zram *zram, bool reset_capacity) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 552 | { |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 553 | size_t index; |
| 554 | struct zram_meta *meta; |
| 555 | |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 556 | flush_work(&zram->free_work); |
| 557 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 558 | down_write(&zram->init_lock); |
| 559 | if (!zram->init_done) { |
| 560 | up_write(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 561 | return; |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 562 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 563 | |
| 564 | meta = zram->meta; |
| 565 | zram->init_done = 0; |
| 566 | |
| 567 | /* Free all pages that are still in this zram device */ |
| 568 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { |
| 569 | unsigned long handle = meta->table[index].handle; |
| 570 | if (!handle) |
| 571 | continue; |
| 572 | |
| 573 | zs_free(meta->mem_pool, handle); |
| 574 | } |
| 575 | |
| 576 | zram_meta_free(zram->meta); |
| 577 | zram->meta = NULL; |
| 578 | /* Reset stats */ |
| 579 | memset(&zram->stats, 0, sizeof(zram->stats)); |
| 580 | |
| 581 | zram->disksize = 0; |
Minchan Kim | 2b86ab9 | 2013-08-12 15:13:55 +0900 | [diff] [blame] | 582 | if (reset_capacity) |
| 583 | set_capacity(zram->disk, 0); |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 584 | up_write(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 585 | } |
| 586 | |
| 587 | static void zram_init_device(struct zram *zram, struct zram_meta *meta) |
| 588 | { |
| 589 | if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) { |
| 590 | pr_info( |
| 591 | "There is little point creating a zram of greater than " |
| 592 | "twice the size of memory since we expect a 2:1 compression " |
| 593 | "ratio. Note that zram uses about 0.1%% of the size of " |
| 594 | "the disk when not in use so a huge zram is " |
| 595 | "wasteful.\n" |
| 596 | "\tMemory Size: %lu kB\n" |
| 597 | "\tSize you selected: %llu kB\n" |
| 598 | "Continuing anyway ...\n", |
| 599 | (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10 |
| 600 | ); |
| 601 | } |
| 602 | |
| 603 | /* zram devices sort of resembles non-rotational disks */ |
| 604 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); |
| 605 | |
| 606 | zram->meta = meta; |
| 607 | zram->init_done = 1; |
| 608 | |
| 609 | pr_debug("Initialization done!\n"); |
| 610 | } |
| 611 | |
| 612 | static ssize_t disksize_store(struct device *dev, |
| 613 | struct device_attribute *attr, const char *buf, size_t len) |
| 614 | { |
| 615 | u64 disksize; |
| 616 | struct zram_meta *meta; |
| 617 | struct zram *zram = dev_to_zram(dev); |
| 618 | |
| 619 | disksize = memparse(buf, NULL); |
| 620 | if (!disksize) |
| 621 | return -EINVAL; |
| 622 | |
| 623 | disksize = PAGE_ALIGN(disksize); |
| 624 | meta = zram_meta_alloc(disksize); |
| 625 | down_write(&zram->init_lock); |
| 626 | if (zram->init_done) { |
| 627 | up_write(&zram->init_lock); |
| 628 | zram_meta_free(meta); |
| 629 | pr_info("Cannot change disksize for initialized device\n"); |
| 630 | return -EBUSY; |
| 631 | } |
| 632 | |
| 633 | zram->disksize = disksize; |
| 634 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
| 635 | zram_init_device(zram, meta); |
| 636 | up_write(&zram->init_lock); |
| 637 | |
| 638 | return len; |
| 639 | } |
| 640 | |
| 641 | static ssize_t reset_store(struct device *dev, |
| 642 | struct device_attribute *attr, const char *buf, size_t len) |
| 643 | { |
| 644 | int ret; |
| 645 | unsigned short do_reset; |
| 646 | struct zram *zram; |
| 647 | struct block_device *bdev; |
| 648 | |
| 649 | zram = dev_to_zram(dev); |
| 650 | bdev = bdget_disk(zram->disk, 0); |
| 651 | |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 652 | if (!bdev) |
| 653 | return -ENOMEM; |
| 654 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 655 | /* Do not reset an active device! */ |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 656 | if (bdev->bd_holders) { |
| 657 | ret = -EBUSY; |
| 658 | goto out; |
| 659 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 660 | |
| 661 | ret = kstrtou16(buf, 10, &do_reset); |
| 662 | if (ret) |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 663 | goto out; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 664 | |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 665 | if (!do_reset) { |
| 666 | ret = -EINVAL; |
| 667 | goto out; |
| 668 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 669 | |
| 670 | /* Make sure all pending I/O is finished */ |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 671 | fsync_bdev(bdev); |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 672 | bdput(bdev); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 673 | |
Minchan Kim | 2b86ab9 | 2013-08-12 15:13:55 +0900 | [diff] [blame] | 674 | zram_reset_device(zram, true); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 675 | return len; |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 676 | |
| 677 | out: |
| 678 | bdput(bdev); |
| 679 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 680 | } |
| 681 | |
| 682 | static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 683 | { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 684 | int offset; |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 685 | u32 index; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 686 | struct bio_vec bvec; |
| 687 | struct bvec_iter iter; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 688 | |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 689 | switch (rw) { |
| 690 | case READ: |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 691 | atomic64_inc(&zram->stats.num_reads); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 692 | break; |
| 693 | case WRITE: |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 694 | atomic64_inc(&zram->stats.num_writes); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 695 | break; |
| 696 | } |
| 697 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 698 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
| 699 | offset = (bio->bi_iter.bi_sector & |
| 700 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 701 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 702 | bio_for_each_segment(bvec, bio, iter) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 703 | int max_transfer_size = PAGE_SIZE - offset; |
| 704 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 705 | if (bvec.bv_len > max_transfer_size) { |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 706 | /* |
| 707 | * zram_bvec_rw() can only make operation on a single |
| 708 | * zram page. Split the bio vector. |
| 709 | */ |
| 710 | struct bio_vec bv; |
| 711 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 712 | bv.bv_page = bvec.bv_page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 713 | bv.bv_len = max_transfer_size; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 714 | bv.bv_offset = bvec.bv_offset; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 715 | |
| 716 | if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) |
| 717 | goto out; |
| 718 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 719 | bv.bv_len = bvec.bv_len - max_transfer_size; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 720 | bv.bv_offset += max_transfer_size; |
| 721 | if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) |
| 722 | goto out; |
| 723 | } else |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 724 | if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 725 | < 0) |
| 726 | goto out; |
| 727 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 728 | update_position(&index, &offset, &bvec); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 729 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 730 | |
| 731 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
| 732 | bio_endio(bio, 0); |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame] | 733 | return; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 734 | |
| 735 | out: |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 736 | bio_io_error(bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 737 | } |
| 738 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 739 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 740 | * Handler function for all zram I/O requests. |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 741 | */ |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 742 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 743 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 744 | struct zram *zram = queue->queuedata; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 745 | |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 746 | down_read(&zram->init_lock); |
| 747 | if (unlikely(!zram->init_done)) |
Minchan Kim | 3de738c | 2013-01-30 11:41:41 +0900 | [diff] [blame] | 748 | goto error; |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 749 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 750 | if (!valid_io_request(zram, bio)) { |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 751 | atomic64_inc(&zram->stats.invalid_io); |
Minchan Kim | 3de738c | 2013-01-30 11:41:41 +0900 | [diff] [blame] | 752 | goto error; |
Jerome Marchand | 6642a67 | 2011-02-17 17:11:49 +0100 | [diff] [blame] | 753 | } |
| 754 | |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 755 | __zram_make_request(zram, bio, bio_data_dir(bio)); |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 756 | up_read(&zram->init_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 757 | |
Linus Torvalds | b4fdcb0 | 2011-11-04 17:06:58 -0700 | [diff] [blame] | 758 | return; |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 759 | |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 760 | error: |
Minchan Kim | 3de738c | 2013-01-30 11:41:41 +0900 | [diff] [blame] | 761 | up_read(&zram->init_lock); |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 762 | bio_io_error(bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 763 | } |
| 764 | |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 765 | static void zram_slot_free(struct work_struct *work) |
| 766 | { |
| 767 | struct zram *zram; |
| 768 | |
| 769 | zram = container_of(work, struct zram, free_work); |
| 770 | down_write(&zram->lock); |
| 771 | handle_pending_slot_free(zram); |
| 772 | up_write(&zram->lock); |
| 773 | } |
| 774 | |
| 775 | static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq) |
| 776 | { |
| 777 | spin_lock(&zram->slot_free_lock); |
| 778 | free_rq->next = zram->slot_free_rq; |
| 779 | zram->slot_free_rq = free_rq; |
| 780 | spin_unlock(&zram->slot_free_lock); |
| 781 | } |
| 782 | |
Nitin Gupta | 2ccbec0 | 2011-09-09 19:01:00 -0400 | [diff] [blame] | 783 | static void zram_slot_free_notify(struct block_device *bdev, |
| 784 | unsigned long index) |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 785 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 786 | struct zram *zram; |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 787 | struct zram_slot_free *free_rq; |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 788 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 789 | zram = bdev->bd_disk->private_data; |
Jiang Liu | da5cc7d | 2013-06-07 00:07:31 +0800 | [diff] [blame] | 790 | atomic64_inc(&zram->stats.notify_free); |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 791 | |
| 792 | free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC); |
| 793 | if (!free_rq) |
| 794 | return; |
| 795 | |
| 796 | free_rq->index = index; |
| 797 | add_slot_free(zram, free_rq); |
| 798 | schedule_work(&zram->free_work); |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 799 | } |
| 800 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 801 | static const struct block_device_operations zram_devops = { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 802 | .swap_slot_free_notify = zram_slot_free_notify, |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 803 | .owner = THIS_MODULE |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 804 | }; |
| 805 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 806 | static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, |
| 807 | disksize_show, disksize_store); |
| 808 | static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); |
| 809 | static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); |
| 810 | static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); |
| 811 | static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); |
| 812 | static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); |
| 813 | static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL); |
| 814 | static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL); |
| 815 | static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL); |
| 816 | static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL); |
| 817 | static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL); |
| 818 | |
| 819 | static struct attribute *zram_disk_attrs[] = { |
| 820 | &dev_attr_disksize.attr, |
| 821 | &dev_attr_initstate.attr, |
| 822 | &dev_attr_reset.attr, |
| 823 | &dev_attr_num_reads.attr, |
| 824 | &dev_attr_num_writes.attr, |
| 825 | &dev_attr_invalid_io.attr, |
| 826 | &dev_attr_notify_free.attr, |
| 827 | &dev_attr_zero_pages.attr, |
| 828 | &dev_attr_orig_data_size.attr, |
| 829 | &dev_attr_compr_data_size.attr, |
| 830 | &dev_attr_mem_used_total.attr, |
| 831 | NULL, |
| 832 | }; |
| 833 | |
| 834 | static struct attribute_group zram_disk_attr_group = { |
| 835 | .attrs = zram_disk_attrs, |
| 836 | }; |
| 837 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 838 | static int create_device(struct zram *zram, int device_id) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 839 | { |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 840 | int ret = -ENOMEM; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 841 | |
Jerome Marchand | c5bde23 | 2011-06-10 15:28:49 +0200 | [diff] [blame] | 842 | init_rwsem(&zram->lock); |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 843 | init_rwsem(&zram->init_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 844 | |
Minchan Kim | a0c516c | 2013-08-12 15:13:56 +0900 | [diff] [blame] | 845 | INIT_WORK(&zram->free_work, zram_slot_free); |
| 846 | spin_lock_init(&zram->slot_free_lock); |
| 847 | zram->slot_free_rq = NULL; |
| 848 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 849 | zram->queue = blk_alloc_queue(GFP_KERNEL); |
| 850 | if (!zram->queue) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 851 | pr_err("Error allocating disk queue for device %d\n", |
| 852 | device_id); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 853 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 854 | } |
| 855 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 856 | blk_queue_make_request(zram->queue, zram_make_request); |
| 857 | zram->queue->queuedata = zram; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 858 | |
| 859 | /* gendisk structure */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 860 | zram->disk = alloc_disk(1); |
| 861 | if (!zram->disk) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 862 | pr_warn("Error allocating disk structure for device %d\n", |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 863 | device_id); |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 864 | goto out_free_queue; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 865 | } |
| 866 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 867 | zram->disk->major = zram_major; |
| 868 | zram->disk->first_minor = device_id; |
| 869 | zram->disk->fops = &zram_devops; |
| 870 | zram->disk->queue = zram->queue; |
| 871 | zram->disk->private_data = zram; |
| 872 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 873 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 874 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 875 | set_capacity(zram->disk, 0); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 876 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 877 | /* |
| 878 | * To ensure that we always get PAGE_SIZE aligned |
| 879 | * and n*PAGE_SIZED sized I/O requests. |
| 880 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 881 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
Robert Jennings | 7b19b8d | 2011-01-28 08:58:17 -0600 | [diff] [blame] | 882 | blk_queue_logical_block_size(zram->disk->queue, |
| 883 | ZRAM_LOGICAL_BLOCK_SIZE); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 884 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
| 885 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 886 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 887 | add_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 888 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 889 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
| 890 | &zram_disk_attr_group); |
| 891 | if (ret < 0) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 892 | pr_warn("Error creating sysfs group"); |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 893 | goto out_free_disk; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 894 | } |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 895 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 896 | zram->init_done = 0; |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 897 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 898 | |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 899 | out_free_disk: |
| 900 | del_gendisk(zram->disk); |
| 901 | put_disk(zram->disk); |
| 902 | out_free_queue: |
| 903 | blk_cleanup_queue(zram->queue); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 904 | out: |
| 905 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 906 | } |
| 907 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 908 | static void destroy_device(struct zram *zram) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 909 | { |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 910 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
| 911 | &zram_disk_attr_group); |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 912 | |
Rashika Kheria | 59d3fe5 | 2013-10-30 18:43:32 +0530 | [diff] [blame] | 913 | del_gendisk(zram->disk); |
| 914 | put_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 915 | |
Rashika Kheria | 59d3fe5 | 2013-10-30 18:43:32 +0530 | [diff] [blame] | 916 | blk_cleanup_queue(zram->queue); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 917 | } |
| 918 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 919 | static int __init zram_init(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 920 | { |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 921 | int ret, dev_id; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 922 | |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 923 | if (num_devices > max_num_devices) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 924 | pr_warn("Invalid value for num_devices: %u\n", |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 925 | num_devices); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 926 | ret = -EINVAL; |
| 927 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 928 | } |
| 929 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 930 | zram_major = register_blkdev(0, "zram"); |
| 931 | if (zram_major <= 0) { |
Sam Hansen | 94b8435 | 2012-06-07 16:03:47 -0700 | [diff] [blame] | 932 | pr_warn("Unable to get major number\n"); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 933 | ret = -EBUSY; |
| 934 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 935 | } |
| 936 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 937 | /* Allocate the device array and initialize each one */ |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 938 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 939 | if (!zram_devices) { |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 940 | ret = -ENOMEM; |
| 941 | goto unregister; |
| 942 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 943 | |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 944 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 945 | ret = create_device(&zram_devices[dev_id], dev_id); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 946 | if (ret) |
Minchan Kim | 3bf040c | 2010-01-11 16:15:53 +0900 | [diff] [blame] | 947 | goto free_devices; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 948 | } |
| 949 | |
Davidlohr Bueso | ca3d70b | 2013-01-01 21:24:13 -0800 | [diff] [blame] | 950 | pr_info("Created %u device(s) ...\n", num_devices); |
| 951 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 952 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 953 | |
Minchan Kim | 3bf040c | 2010-01-11 16:15:53 +0900 | [diff] [blame] | 954 | free_devices: |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 955 | while (dev_id) |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 956 | destroy_device(&zram_devices[--dev_id]); |
| 957 | kfree(zram_devices); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 958 | unregister: |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 959 | unregister_blkdev(zram_major, "zram"); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 960 | out: |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 961 | return ret; |
| 962 | } |
| 963 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 964 | static void __exit zram_exit(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 965 | { |
| 966 | int i; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 967 | struct zram *zram; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 968 | |
Nitin Gupta | 5fa5a90 | 2012-02-12 23:04:45 -0500 | [diff] [blame] | 969 | for (i = 0; i < num_devices; i++) { |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 970 | zram = &zram_devices[i]; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 971 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 972 | destroy_device(zram); |
Minchan Kim | 2b86ab9 | 2013-08-12 15:13:55 +0900 | [diff] [blame] | 973 | /* |
| 974 | * Shouldn't access zram->disk after destroy_device |
| 975 | * because destroy_device already released zram->disk. |
| 976 | */ |
| 977 | zram_reset_device(zram, false); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 978 | } |
| 979 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 980 | unregister_blkdev(zram_major, "zram"); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 981 | |
Noah Watkins | 43801f6 | 2011-07-20 17:05:57 -0600 | [diff] [blame] | 982 | kfree(zram_devices); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 983 | pr_debug("Cleanup done!\n"); |
| 984 | } |
| 985 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 986 | module_init(zram_init); |
| 987 | module_exit(zram_exit); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 988 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 989 | module_param(num_devices, uint, 0); |
| 990 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); |
| 991 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 992 | MODULE_LICENSE("Dual BSD/GPL"); |
| 993 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 994 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |