Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2 | * Compressed RAM block device |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 3 | * |
Nitin Gupta | 1130ebb | 2010-01-28 21:21:35 +0530 | [diff] [blame] | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
Minchan Kim | 7bfb3de | 2014-01-30 15:45:55 -0800 | [diff] [blame] | 5 | * 2012, 2013 Minchan Kim |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 6 | * |
| 7 | * This code is released using a dual license strategy: BSD/GPL |
| 8 | * You can choose the licence that better fits your requirements. |
| 9 | * |
| 10 | * Released under the terms of 3-clause BSD License |
| 11 | * Released under the terms of GNU General Public License Version 2.0 |
| 12 | * |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 13 | */ |
| 14 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 15 | #define KMSG_COMPONENT "zram" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 17 | |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kernel.h> |
Randy Dunlap | 8946a08 | 2010-06-23 20:27:09 -0700 | [diff] [blame] | 20 | #include <linux/bio.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 21 | #include <linux/bitops.h> |
| 22 | #include <linux/blkdev.h> |
| 23 | #include <linux/buffer_head.h> |
| 24 | #include <linux/device.h> |
| 25 | #include <linux/genhd.h> |
| 26 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Minchan Kim | 2e264fb | 2017-01-10 16:58:21 -0800 | [diff] [blame] | 28 | #include <linux/backing-dev.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 29 | #include <linux/string.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 30 | #include <linux/vmalloc.h> |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 31 | #include <linux/err.h> |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 32 | #include <linux/idr.h> |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 33 | #include <linux/sysfs.h> |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 34 | #include <linux/debugfs.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 35 | |
Nitin Gupta | 16a4bfb | 2010-06-01 13:31:24 +0530 | [diff] [blame] | 36 | #include "zram_drv.h" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 37 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 38 | static DEFINE_IDR(zram_index_idr); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 39 | /* idr index must be protected */ |
| 40 | static DEFINE_MUTEX(zram_index_mutex); |
| 41 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 42 | static int zram_major; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 43 | static const char *default_compressor = "lzo"; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 44 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 45 | /* Module params (documentation at end) */ |
Davidlohr Bueso | ca3d70b | 2013-01-01 21:24:13 -0800 | [diff] [blame] | 46 | static unsigned int num_devices = 1; |
Sergey Senozhatsky | 797b0857 | 2018-04-05 16:24:47 -0700 | [diff] [blame] | 47 | /* |
| 48 | * Pages that compress to sizes equals or greater than this are stored |
| 49 | * uncompressed in memory. |
| 50 | */ |
| 51 | static size_t huge_class_size; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 52 | |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 53 | static void zram_free_page(struct zram *zram, size_t index); |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 54 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
| 55 | u32 index, int offset, struct bio *bio); |
| 56 | |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 57 | |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 58 | static int zram_slot_trylock(struct zram *zram, u32 index) |
| 59 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 60 | return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 61 | } |
| 62 | |
Minchan Kim | e2822903 | 2018-06-07 17:05:39 -0700 | [diff] [blame] | 63 | static void zram_slot_lock(struct zram *zram, u32 index) |
| 64 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 65 | bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags); |
Minchan Kim | e2822903 | 2018-06-07 17:05:39 -0700 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | static void zram_slot_unlock(struct zram *zram, u32 index) |
| 69 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 70 | bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); |
Minchan Kim | e2822903 | 2018-06-07 17:05:39 -0700 | [diff] [blame] | 71 | } |
| 72 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 73 | static inline bool init_done(struct zram *zram) |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 74 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 75 | return zram->disksize; |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 78 | static inline struct zram *dev_to_zram(struct device *dev) |
| 79 | { |
| 80 | return (struct zram *)dev_to_disk(dev)->private_data; |
| 81 | } |
| 82 | |
Minchan Kim | a890b0b | 2017-05-03 14:55:50 -0700 | [diff] [blame] | 83 | static unsigned long zram_get_handle(struct zram *zram, u32 index) |
| 84 | { |
| 85 | return zram->table[index].handle; |
| 86 | } |
| 87 | |
| 88 | static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) |
| 89 | { |
| 90 | zram->table[index].handle = handle; |
| 91 | } |
| 92 | |
Sergey Senozhatsky | b31177f | 2015-06-25 15:00:16 -0700 | [diff] [blame] | 93 | /* flag operations require table entry bit_spin_lock() being held */ |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 94 | static bool zram_test_flag(struct zram *zram, u32 index, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 95 | enum zram_pageflags flag) |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 96 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 97 | return zram->table[index].flags & BIT(flag); |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 98 | } |
| 99 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 100 | static void zram_set_flag(struct zram *zram, u32 index, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 101 | enum zram_pageflags flag) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 102 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 103 | zram->table[index].flags |= BIT(flag); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 104 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 105 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 106 | static void zram_clear_flag(struct zram *zram, u32 index, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 107 | enum zram_pageflags flag) |
| 108 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 109 | zram->table[index].flags &= ~BIT(flag); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 112 | static inline void zram_set_element(struct zram *zram, u32 index, |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 113 | unsigned long element) |
| 114 | { |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 115 | zram->table[index].element = element; |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 116 | } |
| 117 | |
Minchan Kim | a890b0b | 2017-05-03 14:55:50 -0700 | [diff] [blame] | 118 | static unsigned long zram_get_element(struct zram *zram, u32 index) |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 119 | { |
Minchan Kim | a890b0b | 2017-05-03 14:55:50 -0700 | [diff] [blame] | 120 | return zram->table[index].element; |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 121 | } |
| 122 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 123 | static size_t zram_get_obj_size(struct zram *zram, u32 index) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 124 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 125 | return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 128 | static void zram_set_obj_size(struct zram *zram, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 129 | u32 index, size_t size) |
| 130 | { |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 131 | unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 132 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 133 | zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 136 | static inline bool zram_allocated(struct zram *zram, u32 index) |
| 137 | { |
| 138 | return zram_get_obj_size(zram, index) || |
| 139 | zram_test_flag(zram, index, ZRAM_SAME) || |
| 140 | zram_test_flag(zram, index, ZRAM_WB); |
| 141 | } |
| 142 | |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 143 | #if PAGE_SIZE != 4096 |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 144 | static inline bool is_partial_io(struct bio_vec *bvec) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 145 | { |
| 146 | return bvec->bv_len != PAGE_SIZE; |
| 147 | } |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 148 | #else |
| 149 | static inline bool is_partial_io(struct bio_vec *bvec) |
| 150 | { |
| 151 | return false; |
| 152 | } |
| 153 | #endif |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 154 | |
| 155 | /* |
| 156 | * Check if request is within bounds and aligned on zram logical blocks. |
| 157 | */ |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 158 | static inline bool valid_io_request(struct zram *zram, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 159 | sector_t start, unsigned int size) |
| 160 | { |
| 161 | u64 end, bound; |
| 162 | |
| 163 | /* unaligned request */ |
| 164 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 165 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 166 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 167 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 168 | |
| 169 | end = start + (size >> SECTOR_SHIFT); |
| 170 | bound = zram->disksize >> SECTOR_SHIFT; |
| 171 | /* out of range range */ |
| 172 | if (unlikely(start >= bound || end > bound || start > end)) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 173 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 174 | |
| 175 | /* I/O request is valid */ |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 176 | return true; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) |
| 180 | { |
Minchan Kim | e7df4ff | 2017-05-03 14:55:38 -0700 | [diff] [blame] | 181 | *index += (*offset + bvec->bv_len) / PAGE_SIZE; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 182 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; |
| 183 | } |
| 184 | |
| 185 | static inline void update_used_max(struct zram *zram, |
| 186 | const unsigned long pages) |
| 187 | { |
| 188 | unsigned long old_max, cur_max; |
| 189 | |
| 190 | old_max = atomic_long_read(&zram->stats.max_used_pages); |
| 191 | |
| 192 | do { |
| 193 | cur_max = old_max; |
| 194 | if (pages > cur_max) |
| 195 | old_max = atomic_long_cmpxchg( |
| 196 | &zram->stats.max_used_pages, cur_max, pages); |
| 197 | } while (old_max != cur_max); |
| 198 | } |
| 199 | |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 200 | static inline void zram_fill_page(char *ptr, unsigned long len, |
| 201 | unsigned long value) |
| 202 | { |
| 203 | int i; |
| 204 | unsigned long *page = (unsigned long *)ptr; |
| 205 | |
| 206 | WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); |
| 207 | |
| 208 | if (likely(value == 0)) { |
| 209 | memset(ptr, 0, len); |
| 210 | } else { |
| 211 | for (i = 0; i < len / sizeof(*page); i++) |
| 212 | page[i] = value; |
| 213 | } |
| 214 | } |
| 215 | |
| 216 | static bool page_same_filled(void *ptr, unsigned long *element) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 217 | { |
| 218 | unsigned int pos; |
| 219 | unsigned long *page; |
Sangwoo Park | 379ceff | 2017-05-03 14:55:56 -0700 | [diff] [blame] | 220 | unsigned long val; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 221 | |
| 222 | page = (unsigned long *)ptr; |
Sangwoo Park | 379ceff | 2017-05-03 14:55:56 -0700 | [diff] [blame] | 223 | val = page[0]; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 224 | |
Sangwoo Park | 379ceff | 2017-05-03 14:55:56 -0700 | [diff] [blame] | 225 | for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { |
| 226 | if (val != page[pos]) |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 227 | return false; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Sangwoo Park | 379ceff | 2017-05-03 14:55:56 -0700 | [diff] [blame] | 230 | *element = val; |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 231 | |
Geliang Tang | 1c53e0d | 2015-11-06 16:29:06 -0800 | [diff] [blame] | 232 | return true; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 233 | } |
| 234 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 235 | static ssize_t initstate_show(struct device *dev, |
| 236 | struct device_attribute *attr, char *buf) |
| 237 | { |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 238 | u32 val; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 239 | struct zram *zram = dev_to_zram(dev); |
| 240 | |
Sergey Senozhatsky | a68eb3b | 2014-04-07 15:38:04 -0700 | [diff] [blame] | 241 | down_read(&zram->init_lock); |
| 242 | val = init_done(zram); |
| 243 | up_read(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 244 | |
Sergey Senozhatsky | 56b4e8c | 2014-04-07 15:38:22 -0700 | [diff] [blame] | 245 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 246 | } |
| 247 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 248 | static ssize_t disksize_show(struct device *dev, |
| 249 | struct device_attribute *attr, char *buf) |
| 250 | { |
| 251 | struct zram *zram = dev_to_zram(dev); |
| 252 | |
| 253 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); |
| 254 | } |
| 255 | |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 256 | static ssize_t mem_limit_store(struct device *dev, |
| 257 | struct device_attribute *attr, const char *buf, size_t len) |
| 258 | { |
| 259 | u64 limit; |
| 260 | char *tmp; |
| 261 | struct zram *zram = dev_to_zram(dev); |
| 262 | |
| 263 | limit = memparse(buf, &tmp); |
| 264 | if (buf == tmp) /* no chars parsed, invalid input */ |
| 265 | return -EINVAL; |
| 266 | |
| 267 | down_write(&zram->init_lock); |
| 268 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; |
| 269 | up_write(&zram->init_lock); |
| 270 | |
| 271 | return len; |
| 272 | } |
| 273 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 274 | static ssize_t mem_used_max_store(struct device *dev, |
| 275 | struct device_attribute *attr, const char *buf, size_t len) |
| 276 | { |
| 277 | int err; |
| 278 | unsigned long val; |
| 279 | struct zram *zram = dev_to_zram(dev); |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 280 | |
| 281 | err = kstrtoul(buf, 10, &val); |
| 282 | if (err || val != 0) |
| 283 | return -EINVAL; |
| 284 | |
| 285 | down_read(&zram->init_lock); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 286 | if (init_done(zram)) { |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 287 | atomic_long_set(&zram->stats.max_used_pages, |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 288 | zs_get_total_pages(zram->mem_pool)); |
Weijie Yang | 5a99e95 | 2014-10-29 14:50:57 -0700 | [diff] [blame] | 289 | } |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 290 | up_read(&zram->init_lock); |
| 291 | |
| 292 | return len; |
| 293 | } |
| 294 | |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 295 | static ssize_t idle_store(struct device *dev, |
| 296 | struct device_attribute *attr, const char *buf, size_t len) |
| 297 | { |
| 298 | struct zram *zram = dev_to_zram(dev); |
| 299 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
| 300 | int index; |
| 301 | char mode_buf[8]; |
| 302 | ssize_t sz; |
| 303 | |
| 304 | sz = strscpy(mode_buf, buf, sizeof(mode_buf)); |
| 305 | if (sz <= 0) |
| 306 | return -EINVAL; |
| 307 | |
| 308 | /* ignore trailing new line */ |
| 309 | if (mode_buf[sz - 1] == '\n') |
| 310 | mode_buf[sz - 1] = 0x00; |
| 311 | |
| 312 | if (strcmp(mode_buf, "all")) |
| 313 | return -EINVAL; |
| 314 | |
| 315 | down_read(&zram->init_lock); |
| 316 | if (!init_done(zram)) { |
| 317 | up_read(&zram->init_lock); |
| 318 | return -EINVAL; |
| 319 | } |
| 320 | |
| 321 | for (index = 0; index < nr_pages; index++) { |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 322 | /* |
| 323 | * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race. |
| 324 | * See the comment in writeback_store. |
| 325 | */ |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 326 | zram_slot_lock(zram, index); |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 327 | if (zram_allocated(zram, index) && |
| 328 | !zram_test_flag(zram, index, ZRAM_UNDER_WB)) |
| 329 | zram_set_flag(zram, index, ZRAM_IDLE); |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 330 | zram_slot_unlock(zram, index); |
| 331 | } |
| 332 | |
| 333 | up_read(&zram->init_lock); |
| 334 | |
| 335 | return len; |
| 336 | } |
| 337 | |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 338 | #ifdef CONFIG_ZRAM_WRITEBACK |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 339 | static ssize_t writeback_limit_enable_store(struct device *dev, |
| 340 | struct device_attribute *attr, const char *buf, size_t len) |
| 341 | { |
| 342 | struct zram *zram = dev_to_zram(dev); |
| 343 | u64 val; |
| 344 | ssize_t ret = -EINVAL; |
| 345 | |
| 346 | if (kstrtoull(buf, 10, &val)) |
| 347 | return ret; |
| 348 | |
| 349 | down_read(&zram->init_lock); |
| 350 | spin_lock(&zram->wb_limit_lock); |
| 351 | zram->wb_limit_enable = val; |
| 352 | spin_unlock(&zram->wb_limit_lock); |
| 353 | up_read(&zram->init_lock); |
| 354 | ret = len; |
| 355 | |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | static ssize_t writeback_limit_enable_show(struct device *dev, |
| 360 | struct device_attribute *attr, char *buf) |
| 361 | { |
| 362 | bool val; |
| 363 | struct zram *zram = dev_to_zram(dev); |
| 364 | |
| 365 | down_read(&zram->init_lock); |
| 366 | spin_lock(&zram->wb_limit_lock); |
| 367 | val = zram->wb_limit_enable; |
| 368 | spin_unlock(&zram->wb_limit_lock); |
| 369 | up_read(&zram->init_lock); |
| 370 | |
| 371 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); |
| 372 | } |
| 373 | |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 374 | static ssize_t writeback_limit_store(struct device *dev, |
| 375 | struct device_attribute *attr, const char *buf, size_t len) |
| 376 | { |
| 377 | struct zram *zram = dev_to_zram(dev); |
| 378 | u64 val; |
| 379 | ssize_t ret = -EINVAL; |
| 380 | |
| 381 | if (kstrtoull(buf, 10, &val)) |
| 382 | return ret; |
| 383 | |
| 384 | down_read(&zram->init_lock); |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 385 | spin_lock(&zram->wb_limit_lock); |
| 386 | zram->bd_wb_limit = val; |
| 387 | spin_unlock(&zram->wb_limit_lock); |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 388 | up_read(&zram->init_lock); |
| 389 | ret = len; |
| 390 | |
| 391 | return ret; |
| 392 | } |
| 393 | |
| 394 | static ssize_t writeback_limit_show(struct device *dev, |
| 395 | struct device_attribute *attr, char *buf) |
| 396 | { |
| 397 | u64 val; |
| 398 | struct zram *zram = dev_to_zram(dev); |
| 399 | |
| 400 | down_read(&zram->init_lock); |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 401 | spin_lock(&zram->wb_limit_lock); |
| 402 | val = zram->bd_wb_limit; |
| 403 | spin_unlock(&zram->wb_limit_lock); |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 404 | up_read(&zram->init_lock); |
| 405 | |
| 406 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val); |
| 407 | } |
| 408 | |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 409 | static void reset_bdev(struct zram *zram) |
| 410 | { |
| 411 | struct block_device *bdev; |
| 412 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 413 | if (!zram->backing_dev) |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 414 | return; |
| 415 | |
| 416 | bdev = zram->bdev; |
| 417 | if (zram->old_block_size) |
| 418 | set_blocksize(bdev, zram->old_block_size); |
| 419 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
| 420 | /* hope filp_close flush all of IO */ |
| 421 | filp_close(zram->backing_dev, NULL); |
| 422 | zram->backing_dev = NULL; |
| 423 | zram->old_block_size = 0; |
| 424 | zram->bdev = NULL; |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 425 | |
| 426 | kvfree(zram->bitmap); |
| 427 | zram->bitmap = NULL; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | static ssize_t backing_dev_show(struct device *dev, |
| 431 | struct device_attribute *attr, char *buf) |
| 432 | { |
| 433 | struct zram *zram = dev_to_zram(dev); |
| 434 | struct file *file = zram->backing_dev; |
| 435 | char *p; |
| 436 | ssize_t ret; |
| 437 | |
| 438 | down_read(&zram->init_lock); |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 439 | if (!zram->backing_dev) { |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 440 | memcpy(buf, "none\n", 5); |
| 441 | up_read(&zram->init_lock); |
| 442 | return 5; |
| 443 | } |
| 444 | |
| 445 | p = file_path(file, buf, PAGE_SIZE - 1); |
| 446 | if (IS_ERR(p)) { |
| 447 | ret = PTR_ERR(p); |
| 448 | goto out; |
| 449 | } |
| 450 | |
| 451 | ret = strlen(p); |
| 452 | memmove(buf, p, ret); |
| 453 | buf[ret++] = '\n'; |
| 454 | out: |
| 455 | up_read(&zram->init_lock); |
| 456 | return ret; |
| 457 | } |
| 458 | |
| 459 | static ssize_t backing_dev_store(struct device *dev, |
| 460 | struct device_attribute *attr, const char *buf, size_t len) |
| 461 | { |
| 462 | char *file_name; |
Peter Kalauskas | 48f9091 | 2018-08-21 21:54:02 -0700 | [diff] [blame] | 463 | size_t sz; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 464 | struct file *backing_dev = NULL; |
| 465 | struct inode *inode; |
| 466 | struct address_space *mapping; |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 467 | unsigned int bitmap_sz, old_block_size = 0; |
| 468 | unsigned long nr_pages, *bitmap = NULL; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 469 | struct block_device *bdev = NULL; |
| 470 | int err; |
| 471 | struct zram *zram = dev_to_zram(dev); |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 472 | gfp_t kmalloc_flags; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 473 | |
| 474 | file_name = kmalloc(PATH_MAX, GFP_KERNEL); |
| 475 | if (!file_name) |
| 476 | return -ENOMEM; |
| 477 | |
| 478 | down_write(&zram->init_lock); |
| 479 | if (init_done(zram)) { |
| 480 | pr_info("Can't setup backing device for initialized device\n"); |
| 481 | err = -EBUSY; |
| 482 | goto out; |
| 483 | } |
| 484 | |
Peter Kalauskas | 48f9091 | 2018-08-21 21:54:02 -0700 | [diff] [blame] | 485 | strlcpy(file_name, buf, PATH_MAX); |
| 486 | /* ignore trailing newline */ |
| 487 | sz = strlen(file_name); |
| 488 | if (sz > 0 && file_name[sz - 1] == '\n') |
| 489 | file_name[sz - 1] = 0x00; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 490 | |
| 491 | backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); |
| 492 | if (IS_ERR(backing_dev)) { |
| 493 | err = PTR_ERR(backing_dev); |
| 494 | backing_dev = NULL; |
| 495 | goto out; |
| 496 | } |
| 497 | |
| 498 | mapping = backing_dev->f_mapping; |
| 499 | inode = mapping->host; |
| 500 | |
| 501 | /* Support only block device in this moment */ |
| 502 | if (!S_ISBLK(inode->i_mode)) { |
| 503 | err = -ENOTBLK; |
| 504 | goto out; |
| 505 | } |
| 506 | |
| 507 | bdev = bdgrab(I_BDEV(inode)); |
| 508 | err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); |
Minchan Kim | 04a8efb | 2018-12-28 00:36:37 -0800 | [diff] [blame] | 509 | if (err < 0) { |
| 510 | bdev = NULL; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 511 | goto out; |
Minchan Kim | 04a8efb | 2018-12-28 00:36:37 -0800 | [diff] [blame] | 512 | } |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 513 | |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 514 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; |
| 515 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); |
| 516 | kmalloc_flags = GFP_KERNEL | __GFP_ZERO; |
| 517 | if (bitmap_sz > PAGE_SIZE) |
| 518 | kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY; |
| 519 | |
| 520 | bitmap = kmalloc_node(bitmap_sz, kmalloc_flags, NUMA_NO_NODE); |
| 521 | if (!bitmap && bitmap_sz > PAGE_SIZE) |
| 522 | bitmap = vzalloc(bitmap_sz); |
| 523 | |
| 524 | if (!bitmap) { |
| 525 | err = -ENOMEM; |
| 526 | goto out; |
| 527 | } |
| 528 | |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 529 | old_block_size = block_size(bdev); |
| 530 | err = set_blocksize(bdev, PAGE_SIZE); |
| 531 | if (err) |
| 532 | goto out; |
| 533 | |
| 534 | reset_bdev(zram); |
| 535 | |
| 536 | zram->old_block_size = old_block_size; |
| 537 | zram->bdev = bdev; |
| 538 | zram->backing_dev = backing_dev; |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 539 | zram->bitmap = bitmap; |
| 540 | zram->nr_pages = nr_pages; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 541 | up_write(&zram->init_lock); |
| 542 | |
| 543 | pr_info("setup backing device %s\n", file_name); |
| 544 | kfree(file_name); |
| 545 | |
| 546 | return len; |
| 547 | out: |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 548 | if (bitmap) |
| 549 | kvfree(bitmap); |
| 550 | |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 551 | if (bdev) |
| 552 | blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); |
| 553 | |
| 554 | if (backing_dev) |
| 555 | filp_close(backing_dev, NULL); |
| 556 | |
| 557 | up_write(&zram->init_lock); |
| 558 | |
| 559 | kfree(file_name); |
| 560 | |
| 561 | return err; |
| 562 | } |
| 563 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 564 | static unsigned long alloc_block_bdev(struct zram *zram) |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 565 | { |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 566 | unsigned long blk_idx = 1; |
| 567 | retry: |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 568 | /* skip 0 bit to confuse zram.handle = 0 */ |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 569 | blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx); |
| 570 | if (blk_idx == zram->nr_pages) |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 571 | return 0; |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 572 | |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 573 | if (test_and_set_bit(blk_idx, zram->bitmap)) |
| 574 | goto retry; |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 575 | |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 576 | atomic64_inc(&zram->stats.bd_count); |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 577 | return blk_idx; |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 578 | } |
| 579 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 580 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 581 | { |
| 582 | int was_set; |
| 583 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 584 | was_set = test_and_clear_bit(blk_idx, zram->bitmap); |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 585 | WARN_ON_ONCE(!was_set); |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 586 | atomic64_dec(&zram->stats.bd_count); |
Minchan Kim | 0e05f38 | 2017-09-06 16:19:57 -0700 | [diff] [blame] | 587 | } |
| 588 | |
Colin Ian King | 3c77b56 | 2017-11-15 17:37:08 -0800 | [diff] [blame] | 589 | static void zram_page_end_io(struct bio *bio) |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 590 | { |
| 591 | struct page *page = bio->bi_io_vec[0].bv_page; |
| 592 | |
| 593 | page_endio(page, op_is_write(bio_op(bio)), bio->bi_error); |
| 594 | bio_put(bio); |
| 595 | } |
| 596 | |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 597 | /* |
| 598 | * Returns 1 if the submission is successful. |
| 599 | */ |
| 600 | static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, |
| 601 | unsigned long entry, struct bio *parent) |
| 602 | { |
| 603 | struct bio *bio; |
| 604 | |
| 605 | bio = bio_alloc(GFP_ATOMIC, 1); |
| 606 | if (!bio) |
| 607 | return -ENOMEM; |
| 608 | |
| 609 | bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); |
| 610 | bio->bi_bdev = zram->bdev; |
| 611 | if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { |
| 612 | bio_put(bio); |
| 613 | return -EIO; |
| 614 | } |
| 615 | |
| 616 | if (!parent) { |
| 617 | bio->bi_opf = REQ_OP_READ; |
| 618 | bio->bi_end_io = zram_page_end_io; |
| 619 | } else { |
| 620 | bio->bi_opf = parent->bi_opf; |
| 621 | bio_chain(bio, parent); |
| 622 | } |
| 623 | |
| 624 | submit_bio(bio); |
| 625 | return 1; |
| 626 | } |
| 627 | |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 628 | #define HUGE_WRITEBACK 1 |
| 629 | #define IDLE_WRITEBACK 2 |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 630 | |
| 631 | static ssize_t writeback_store(struct device *dev, |
| 632 | struct device_attribute *attr, const char *buf, size_t len) |
| 633 | { |
| 634 | struct zram *zram = dev_to_zram(dev); |
| 635 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
| 636 | unsigned long index; |
| 637 | struct bio bio; |
| 638 | struct page *page; |
| 639 | ssize_t ret, sz; |
| 640 | char mode_buf[8]; |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 641 | int mode = -1; |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 642 | unsigned long blk_idx = 0; |
| 643 | |
| 644 | sz = strscpy(mode_buf, buf, sizeof(mode_buf)); |
| 645 | if (sz <= 0) |
| 646 | return -EINVAL; |
| 647 | |
| 648 | /* ignore trailing newline */ |
| 649 | if (mode_buf[sz - 1] == '\n') |
| 650 | mode_buf[sz - 1] = 0x00; |
| 651 | |
| 652 | if (!strcmp(mode_buf, "idle")) |
| 653 | mode = IDLE_WRITEBACK; |
| 654 | else if (!strcmp(mode_buf, "huge")) |
| 655 | mode = HUGE_WRITEBACK; |
| 656 | |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 657 | if (mode == -1) |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 658 | return -EINVAL; |
| 659 | |
| 660 | down_read(&zram->init_lock); |
| 661 | if (!init_done(zram)) { |
| 662 | ret = -EINVAL; |
| 663 | goto release_init_lock; |
| 664 | } |
| 665 | |
| 666 | if (!zram->backing_dev) { |
| 667 | ret = -ENODEV; |
| 668 | goto release_init_lock; |
| 669 | } |
| 670 | |
| 671 | page = alloc_page(GFP_KERNEL); |
| 672 | if (!page) { |
| 673 | ret = -ENOMEM; |
| 674 | goto release_init_lock; |
| 675 | } |
| 676 | |
| 677 | for (index = 0; index < nr_pages; index++) { |
| 678 | struct bio_vec bvec; |
| 679 | |
| 680 | bvec.bv_page = page; |
| 681 | bvec.bv_len = PAGE_SIZE; |
| 682 | bvec.bv_offset = 0; |
| 683 | |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 684 | spin_lock(&zram->wb_limit_lock); |
| 685 | if (zram->wb_limit_enable && !zram->bd_wb_limit) { |
| 686 | spin_unlock(&zram->wb_limit_lock); |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 687 | ret = -EIO; |
| 688 | break; |
| 689 | } |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 690 | spin_unlock(&zram->wb_limit_lock); |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 691 | |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 692 | if (!blk_idx) { |
| 693 | blk_idx = alloc_block_bdev(zram); |
| 694 | if (!blk_idx) { |
| 695 | ret = -ENOSPC; |
| 696 | break; |
| 697 | } |
| 698 | } |
| 699 | |
| 700 | zram_slot_lock(zram, index); |
| 701 | if (!zram_allocated(zram, index)) |
| 702 | goto next; |
| 703 | |
| 704 | if (zram_test_flag(zram, index, ZRAM_WB) || |
| 705 | zram_test_flag(zram, index, ZRAM_SAME) || |
| 706 | zram_test_flag(zram, index, ZRAM_UNDER_WB)) |
| 707 | goto next; |
| 708 | |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 709 | if (mode == IDLE_WRITEBACK && |
| 710 | !zram_test_flag(zram, index, ZRAM_IDLE)) |
| 711 | goto next; |
| 712 | if (mode == HUGE_WRITEBACK && |
| 713 | !zram_test_flag(zram, index, ZRAM_HUGE)) |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 714 | goto next; |
| 715 | /* |
| 716 | * Clearing ZRAM_UNDER_WB is duty of caller. |
| 717 | * IOW, zram_free_page never clear it. |
| 718 | */ |
| 719 | zram_set_flag(zram, index, ZRAM_UNDER_WB); |
| 720 | /* Need for hugepage writeback racing */ |
| 721 | zram_set_flag(zram, index, ZRAM_IDLE); |
| 722 | zram_slot_unlock(zram, index); |
| 723 | if (zram_bvec_read(zram, &bvec, index, 0, NULL)) { |
| 724 | zram_slot_lock(zram, index); |
| 725 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); |
| 726 | zram_clear_flag(zram, index, ZRAM_IDLE); |
| 727 | zram_slot_unlock(zram, index); |
| 728 | continue; |
| 729 | } |
| 730 | |
| 731 | bio_init(&bio); |
| 732 | |
| 733 | bio.bi_max_vecs = 1; |
| 734 | bio.bi_io_vec = &bvec; |
| 735 | bio.bi_bdev = zram->bdev; |
| 736 | |
| 737 | bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); |
| 738 | bio_set_op_attrs(&bio, REQ_OP_WRITE, REQ_SYNC); |
| 739 | bio_add_page(&bio, bvec.bv_page, bvec.bv_len, |
| 740 | bvec.bv_offset); |
| 741 | /* |
| 742 | * XXX: A single page IO would be inefficient for write |
| 743 | * but it would be not bad as starter. |
| 744 | */ |
| 745 | ret = submit_bio_wait(&bio); |
| 746 | if (ret) { |
| 747 | zram_slot_lock(zram, index); |
| 748 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); |
| 749 | zram_clear_flag(zram, index, ZRAM_IDLE); |
| 750 | zram_slot_unlock(zram, index); |
| 751 | continue; |
| 752 | } |
| 753 | |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 754 | atomic64_inc(&zram->stats.bd_writes); |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 755 | /* |
| 756 | * We released zram_slot_lock so need to check if the slot was |
| 757 | * changed. If there is freeing for the slot, we can catch it |
| 758 | * easily by zram_allocated. |
| 759 | * A subtle case is the slot is freed/reallocated/marked as |
| 760 | * ZRAM_IDLE again. To close the race, idle_store doesn't |
| 761 | * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB. |
| 762 | * Thus, we could close the race by checking ZRAM_IDLE bit. |
| 763 | */ |
| 764 | zram_slot_lock(zram, index); |
| 765 | if (!zram_allocated(zram, index) || |
| 766 | !zram_test_flag(zram, index, ZRAM_IDLE)) { |
| 767 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); |
| 768 | zram_clear_flag(zram, index, ZRAM_IDLE); |
| 769 | goto next; |
| 770 | } |
| 771 | |
| 772 | zram_free_page(zram, index); |
| 773 | zram_clear_flag(zram, index, ZRAM_UNDER_WB); |
| 774 | zram_set_flag(zram, index, ZRAM_WB); |
| 775 | zram_set_element(zram, index, blk_idx); |
| 776 | blk_idx = 0; |
| 777 | atomic64_inc(&zram->stats.pages_stored); |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 778 | spin_lock(&zram->wb_limit_lock); |
| 779 | if (zram->wb_limit_enable && zram->bd_wb_limit > 0) |
| 780 | zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); |
| 781 | spin_unlock(&zram->wb_limit_lock); |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 782 | next: |
| 783 | zram_slot_unlock(zram, index); |
| 784 | } |
| 785 | |
| 786 | if (blk_idx) |
| 787 | free_block_bdev(zram, blk_idx); |
| 788 | ret = len; |
| 789 | __free_page(page); |
| 790 | release_init_lock: |
| 791 | up_read(&zram->init_lock); |
| 792 | |
| 793 | return ret; |
| 794 | } |
| 795 | |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 796 | struct zram_work { |
| 797 | struct work_struct work; |
| 798 | struct zram *zram; |
| 799 | unsigned long entry; |
| 800 | struct bio *bio; |
| 801 | }; |
| 802 | |
| 803 | #if PAGE_SIZE != 4096 |
| 804 | static void zram_sync_read(struct work_struct *work) |
| 805 | { |
| 806 | struct bio_vec bvec; |
| 807 | struct zram_work *zw = container_of(work, struct zram_work, work); |
| 808 | struct zram *zram = zw->zram; |
| 809 | unsigned long entry = zw->entry; |
| 810 | struct bio *bio = zw->bio; |
| 811 | |
| 812 | read_from_bdev_async(zram, &bvec, entry, bio); |
| 813 | } |
| 814 | |
| 815 | /* |
| 816 | * Block layer want one ->make_request_fn to be active at a time |
| 817 | * so if we use chained IO with parent IO in same context, |
| 818 | * it's a deadlock. To avoid, it, it uses worker thread context. |
| 819 | */ |
| 820 | static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, |
| 821 | unsigned long entry, struct bio *bio) |
| 822 | { |
| 823 | struct zram_work work; |
| 824 | |
| 825 | work.zram = zram; |
| 826 | work.entry = entry; |
| 827 | work.bio = bio; |
| 828 | |
| 829 | INIT_WORK_ONSTACK(&work.work, zram_sync_read); |
| 830 | queue_work(system_unbound_wq, &work.work); |
| 831 | flush_work(&work.work); |
| 832 | destroy_work_on_stack(&work.work); |
| 833 | |
| 834 | return 1; |
| 835 | } |
| 836 | #else |
| 837 | static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, |
| 838 | unsigned long entry, struct bio *bio) |
| 839 | { |
| 840 | WARN_ON(1); |
| 841 | return -EIO; |
| 842 | } |
| 843 | #endif |
| 844 | |
| 845 | static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, |
| 846 | unsigned long entry, struct bio *parent, bool sync) |
| 847 | { |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 848 | atomic64_inc(&zram->stats.bd_reads); |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 849 | if (sync) |
| 850 | return read_from_bdev_sync(zram, bvec, entry, parent); |
| 851 | else |
| 852 | return read_from_bdev_async(zram, bvec, entry, parent); |
| 853 | } |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 854 | #else |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 855 | static inline void reset_bdev(struct zram *zram) {}; |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 856 | static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, |
| 857 | unsigned long entry, struct bio *parent, bool sync) |
| 858 | { |
| 859 | return -EIO; |
| 860 | } |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 861 | |
| 862 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 863 | #endif |
| 864 | |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 865 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
| 866 | |
| 867 | static struct dentry *zram_debugfs_root; |
| 868 | |
| 869 | static void zram_debugfs_create(void) |
| 870 | { |
| 871 | zram_debugfs_root = debugfs_create_dir("zram", NULL); |
| 872 | } |
| 873 | |
| 874 | static void zram_debugfs_destroy(void) |
| 875 | { |
| 876 | debugfs_remove_recursive(zram_debugfs_root); |
| 877 | } |
| 878 | |
| 879 | static void zram_accessed(struct zram *zram, u32 index) |
| 880 | { |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 881 | zram_clear_flag(zram, index, ZRAM_IDLE); |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 882 | zram->table[index].ac_time = ktime_get_boottime(); |
| 883 | } |
| 884 | |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 885 | static ssize_t read_block_state(struct file *file, char __user *buf, |
| 886 | size_t count, loff_t *ppos) |
| 887 | { |
| 888 | char *kbuf; |
| 889 | ssize_t index, written = 0; |
| 890 | struct zram *zram = file->private_data; |
| 891 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
| 892 | struct timespec64 ts; |
| 893 | |
| 894 | gfp_t kmalloc_flags; |
| 895 | |
| 896 | kmalloc_flags = GFP_KERNEL; |
| 897 | if (count > PAGE_SIZE) |
| 898 | kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY; |
| 899 | |
| 900 | kbuf = kmalloc_node(count, kmalloc_flags, NUMA_NO_NODE); |
| 901 | if (!kbuf && count > PAGE_SIZE) |
| 902 | kbuf = vmalloc(count); |
| 903 | if (!kbuf) |
| 904 | return -ENOMEM; |
| 905 | |
| 906 | down_read(&zram->init_lock); |
| 907 | if (!init_done(zram)) { |
| 908 | up_read(&zram->init_lock); |
| 909 | kvfree(kbuf); |
| 910 | return -EINVAL; |
| 911 | } |
| 912 | |
| 913 | for (index = *ppos; index < nr_pages; index++) { |
| 914 | int copied; |
| 915 | |
| 916 | zram_slot_lock(zram, index); |
| 917 | if (!zram_allocated(zram, index)) |
| 918 | goto next; |
| 919 | |
| 920 | ts = ktime_to_timespec64(zram->table[index].ac_time); |
| 921 | copied = snprintf(kbuf + written, count, |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 922 | "%12zd %12lld.%06lu %c%c%c%c\n", |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 923 | index, (s64)ts.tv_sec, |
| 924 | ts.tv_nsec / NSEC_PER_USEC, |
| 925 | zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', |
| 926 | zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 927 | zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', |
| 928 | zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.'); |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 929 | |
| 930 | if (count < copied) { |
| 931 | zram_slot_unlock(zram, index); |
| 932 | break; |
| 933 | } |
| 934 | written += copied; |
| 935 | count -= copied; |
| 936 | next: |
| 937 | zram_slot_unlock(zram, index); |
| 938 | *ppos += 1; |
| 939 | } |
| 940 | |
| 941 | up_read(&zram->init_lock); |
| 942 | if (copy_to_user(buf, kbuf, written)) |
| 943 | written = -EFAULT; |
| 944 | kvfree(kbuf); |
| 945 | |
| 946 | return written; |
| 947 | } |
| 948 | |
| 949 | static const struct file_operations proc_zram_block_state_op = { |
| 950 | .open = simple_open, |
| 951 | .read = read_block_state, |
| 952 | .llseek = default_llseek, |
| 953 | }; |
| 954 | |
| 955 | static void zram_debugfs_register(struct zram *zram) |
| 956 | { |
| 957 | if (!zram_debugfs_root) |
| 958 | return; |
| 959 | |
| 960 | zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name, |
| 961 | zram_debugfs_root); |
| 962 | debugfs_create_file("block_state", 0400, zram->debugfs_dir, |
| 963 | zram, &proc_zram_block_state_op); |
| 964 | } |
| 965 | |
| 966 | static void zram_debugfs_unregister(struct zram *zram) |
| 967 | { |
| 968 | debugfs_remove_recursive(zram->debugfs_dir); |
| 969 | } |
| 970 | #else |
| 971 | static void zram_debugfs_create(void) {}; |
| 972 | static void zram_debugfs_destroy(void) {}; |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 973 | static void zram_accessed(struct zram *zram, u32 index) |
| 974 | { |
| 975 | zram_clear_flag(zram, index, ZRAM_IDLE); |
| 976 | }; |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 977 | static void zram_debugfs_register(struct zram *zram) {}; |
| 978 | static void zram_debugfs_unregister(struct zram *zram) {}; |
| 979 | #endif |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 980 | |
Sergey Senozhatsky | 43209ea | 2016-05-20 16:59:59 -0700 | [diff] [blame] | 981 | /* |
| 982 | * We switched to per-cpu streams and this attr is not needed anymore. |
| 983 | * However, we will keep it around for some time, because: |
| 984 | * a) we may revert per-cpu streams in the future |
| 985 | * b) it's visible to user space and we need to follow our 2 years |
| 986 | * retirement rule; but we already have a number of 'soon to be |
| 987 | * altered' attrs, so max_comp_streams need to wait for the next |
| 988 | * layoff cycle. |
| 989 | */ |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 990 | static ssize_t max_comp_streams_show(struct device *dev, |
| 991 | struct device_attribute *attr, char *buf) |
| 992 | { |
Sergey Senozhatsky | 43209ea | 2016-05-20 16:59:59 -0700 | [diff] [blame] | 993 | return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 994 | } |
| 995 | |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 996 | static ssize_t max_comp_streams_store(struct device *dev, |
| 997 | struct device_attribute *attr, const char *buf, size_t len) |
| 998 | { |
Sergey Senozhatsky | 43209ea | 2016-05-20 16:59:59 -0700 | [diff] [blame] | 999 | return len; |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 1000 | } |
| 1001 | |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1002 | static ssize_t comp_algorithm_show(struct device *dev, |
| 1003 | struct device_attribute *attr, char *buf) |
| 1004 | { |
| 1005 | size_t sz; |
| 1006 | struct zram *zram = dev_to_zram(dev); |
| 1007 | |
| 1008 | down_read(&zram->init_lock); |
| 1009 | sz = zcomp_available_show(zram->compressor, buf); |
| 1010 | up_read(&zram->init_lock); |
| 1011 | |
| 1012 | return sz; |
| 1013 | } |
| 1014 | |
| 1015 | static ssize_t comp_algorithm_store(struct device *dev, |
| 1016 | struct device_attribute *attr, const char *buf, size_t len) |
| 1017 | { |
| 1018 | struct zram *zram = dev_to_zram(dev); |
Matthias Kaehlcke | 520d10d3 | 2017-08-10 15:24:29 -0700 | [diff] [blame] | 1019 | char compressor[ARRAY_SIZE(zram->compressor)]; |
Sergey Senozhatsky | 4bbacd5 | 2015-06-25 15:00:29 -0700 | [diff] [blame] | 1020 | size_t sz; |
| 1021 | |
Sergey Senozhatsky | 415403b | 2016-07-26 15:22:48 -0700 | [diff] [blame] | 1022 | strlcpy(compressor, buf, sizeof(compressor)); |
| 1023 | /* ignore trailing newline */ |
| 1024 | sz = strlen(compressor); |
| 1025 | if (sz > 0 && compressor[sz - 1] == '\n') |
| 1026 | compressor[sz - 1] = 0x00; |
| 1027 | |
| 1028 | if (!zcomp_available_algorithm(compressor)) |
Luis Henriques | 1d5b43b | 2015-11-06 16:29:01 -0800 | [diff] [blame] | 1029 | return -EINVAL; |
| 1030 | |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1031 | down_write(&zram->init_lock); |
| 1032 | if (init_done(zram)) { |
| 1033 | up_write(&zram->init_lock); |
| 1034 | pr_info("Can't change algorithm for initialized device\n"); |
| 1035 | return -EBUSY; |
| 1036 | } |
Sergey Senozhatsky | 4bbacd5 | 2015-06-25 15:00:29 -0700 | [diff] [blame] | 1037 | |
Matthias Kaehlcke | 520d10d3 | 2017-08-10 15:24:29 -0700 | [diff] [blame] | 1038 | strcpy(zram->compressor, compressor); |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1039 | up_write(&zram->init_lock); |
| 1040 | return len; |
| 1041 | } |
| 1042 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1043 | static ssize_t compact_store(struct device *dev, |
| 1044 | struct device_attribute *attr, const char *buf, size_t len) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1045 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1046 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1047 | |
| 1048 | down_read(&zram->init_lock); |
| 1049 | if (!init_done(zram)) { |
| 1050 | up_read(&zram->init_lock); |
| 1051 | return -EINVAL; |
| 1052 | } |
| 1053 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1054 | zs_compact(zram->mem_pool); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1055 | up_read(&zram->init_lock); |
| 1056 | |
| 1057 | return len; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1058 | } |
| 1059 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1060 | static ssize_t io_stat_show(struct device *dev, |
| 1061 | struct device_attribute *attr, char *buf) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1062 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1063 | struct zram *zram = dev_to_zram(dev); |
| 1064 | ssize_t ret; |
| 1065 | |
| 1066 | down_read(&zram->init_lock); |
| 1067 | ret = scnprintf(buf, PAGE_SIZE, |
| 1068 | "%8llu %8llu %8llu %8llu\n", |
| 1069 | (u64)atomic64_read(&zram->stats.failed_reads), |
| 1070 | (u64)atomic64_read(&zram->stats.failed_writes), |
| 1071 | (u64)atomic64_read(&zram->stats.invalid_io), |
| 1072 | (u64)atomic64_read(&zram->stats.notify_free)); |
| 1073 | up_read(&zram->init_lock); |
| 1074 | |
| 1075 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1076 | } |
| 1077 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1078 | static ssize_t mm_stat_show(struct device *dev, |
| 1079 | struct device_attribute *attr, char *buf) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1080 | { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1081 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 1082 | struct zs_pool_stats pool_stats; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1083 | u64 orig_size, mem_used = 0; |
| 1084 | long max_used; |
| 1085 | ssize_t ret; |
| 1086 | |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 1087 | memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); |
| 1088 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1089 | down_read(&zram->init_lock); |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 1090 | if (init_done(zram)) { |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1091 | mem_used = zs_get_total_pages(zram->mem_pool); |
| 1092 | zs_pool_stats(zram->mem_pool, &pool_stats); |
Sergey Senozhatsky | 7d3f393 | 2015-09-08 15:04:35 -0700 | [diff] [blame] | 1093 | } |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1094 | |
| 1095 | orig_size = atomic64_read(&zram->stats.pages_stored); |
| 1096 | max_used = atomic_long_read(&zram->stats.max_used_pages); |
| 1097 | |
| 1098 | ret = scnprintf(buf, PAGE_SIZE, |
Minchan Kim | f185f71 | 2018-06-07 17:05:42 -0700 | [diff] [blame] | 1099 | "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n", |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1100 | orig_size << PAGE_SHIFT, |
| 1101 | (u64)atomic64_read(&zram->stats.compr_data_size), |
| 1102 | mem_used << PAGE_SHIFT, |
| 1103 | zram->limit_pages << PAGE_SHIFT, |
| 1104 | max_used << PAGE_SHIFT, |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 1105 | (u64)atomic64_read(&zram->stats.same_pages), |
Minchan Kim | f185f71 | 2018-06-07 17:05:42 -0700 | [diff] [blame] | 1106 | pool_stats.pages_compacted, |
| 1107 | (u64)atomic64_read(&zram->stats.huge_pages)); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1108 | up_read(&zram->init_lock); |
| 1109 | |
| 1110 | return ret; |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 1111 | } |
| 1112 | |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 1113 | #ifdef CONFIG_ZRAM_WRITEBACK |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 1114 | #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12))) |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 1115 | static ssize_t bd_stat_show(struct device *dev, |
| 1116 | struct device_attribute *attr, char *buf) |
| 1117 | { |
| 1118 | struct zram *zram = dev_to_zram(dev); |
| 1119 | ssize_t ret; |
| 1120 | |
| 1121 | down_read(&zram->init_lock); |
| 1122 | ret = scnprintf(buf, PAGE_SIZE, |
| 1123 | "%8llu %8llu %8llu\n", |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 1124 | FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), |
| 1125 | FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), |
| 1126 | FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 1127 | up_read(&zram->init_lock); |
| 1128 | |
| 1129 | return ret; |
| 1130 | } |
| 1131 | #endif |
| 1132 | |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1133 | static ssize_t debug_stat_show(struct device *dev, |
| 1134 | struct device_attribute *attr, char *buf) |
| 1135 | { |
| 1136 | int version = 1; |
| 1137 | struct zram *zram = dev_to_zram(dev); |
| 1138 | ssize_t ret; |
| 1139 | |
| 1140 | down_read(&zram->init_lock); |
| 1141 | ret = scnprintf(buf, PAGE_SIZE, |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 1142 | "version: %d\n%8llu %8llu\n", |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1143 | version, |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 1144 | (u64)atomic64_read(&zram->stats.writestall), |
| 1145 | (u64)atomic64_read(&zram->stats.miss_free)); |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1146 | up_read(&zram->init_lock); |
| 1147 | |
| 1148 | return ret; |
| 1149 | } |
| 1150 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1151 | static DEVICE_ATTR_RO(io_stat); |
| 1152 | static DEVICE_ATTR_RO(mm_stat); |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 1153 | #ifdef CONFIG_ZRAM_WRITEBACK |
| 1154 | static DEVICE_ATTR_RO(bd_stat); |
| 1155 | #endif |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1156 | static DEVICE_ATTR_RO(debug_stat); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1157 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1158 | static void zram_meta_free(struct zram *zram, u64 disksize) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1159 | { |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 1160 | size_t num_pages = disksize >> PAGE_SHIFT; |
| 1161 | size_t index; |
| 1162 | |
| 1163 | /* Free all pages that are still in this zram device */ |
Minchan Kim | ffa3b81 | 2017-05-03 14:55:53 -0700 | [diff] [blame] | 1164 | for (index = 0; index < num_pages; index++) |
| 1165 | zram_free_page(zram, index); |
Ganesh Mahendran | 1fec117 | 2015-02-12 15:00:33 -0800 | [diff] [blame] | 1166 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1167 | zs_destroy_pool(zram->mem_pool); |
| 1168 | vfree(zram->table); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1169 | } |
| 1170 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1171 | static bool zram_meta_alloc(struct zram *zram, u64 disksize) |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1172 | { |
| 1173 | size_t num_pages; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1174 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1175 | num_pages = disksize >> PAGE_SHIFT; |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1176 | zram->table = vzalloc(num_pages * sizeof(*zram->table)); |
| 1177 | if (!zram->table) |
| 1178 | return false; |
| 1179 | |
| 1180 | zram->mem_pool = zs_create_pool(zram->disk->disk_name); |
| 1181 | if (!zram->mem_pool) { |
| 1182 | vfree(zram->table); |
| 1183 | return false; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1184 | } |
| 1185 | |
Sergey Senozhatsky | 797b0857 | 2018-04-05 16:24:47 -0700 | [diff] [blame] | 1186 | if (!huge_class_size) |
| 1187 | huge_class_size = zs_huge_class_size(zram->mem_pool); |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1188 | return true; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1189 | } |
| 1190 | |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 1191 | /* |
| 1192 | * To protect concurrent access to the same index entry, |
| 1193 | * caller should hold this table index entry's bit_spinlock to |
| 1194 | * indicate this index entry is accessing. |
| 1195 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1196 | static void zram_free_page(struct zram *zram, size_t index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1197 | { |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1198 | unsigned long handle; |
| 1199 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1200 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
Srinivas Paladugu | 5aa04bc | 2019-02-01 16:57:03 -0800 | [diff] [blame] | 1201 | zram->table[index].ac_time.tv64 = 0; |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1202 | #endif |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 1203 | if (zram_test_flag(zram, index, ZRAM_IDLE)) |
| 1204 | zram_clear_flag(zram, index, ZRAM_IDLE); |
| 1205 | |
Minchan Kim | f185f71 | 2018-06-07 17:05:42 -0700 | [diff] [blame] | 1206 | if (zram_test_flag(zram, index, ZRAM_HUGE)) { |
| 1207 | zram_clear_flag(zram, index, ZRAM_HUGE); |
| 1208 | atomic64_dec(&zram->stats.huge_pages); |
| 1209 | } |
| 1210 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1211 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
| 1212 | zram_clear_flag(zram, index, ZRAM_WB); |
| 1213 | free_block_bdev(zram, zram_get_element(zram, index)); |
| 1214 | goto out; |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1215 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1216 | |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 1217 | /* |
| 1218 | * No memory is allocated for same element filled pages. |
| 1219 | * Simply clear same page flag. |
| 1220 | */ |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1221 | if (zram_test_flag(zram, index, ZRAM_SAME)) { |
| 1222 | zram_clear_flag(zram, index, ZRAM_SAME); |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 1223 | atomic64_dec(&zram->stats.same_pages); |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1224 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1225 | } |
| 1226 | |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1227 | handle = zram_get_handle(zram, index); |
zhouxianrong | 74ccaa7 | 2017-02-24 14:59:27 -0800 | [diff] [blame] | 1228 | if (!handle) |
| 1229 | return; |
| 1230 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1231 | zs_free(zram->mem_pool, handle); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1232 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1233 | atomic64_sub(zram_get_obj_size(zram, index), |
Weijie Yang | d2d5e76 | 2014-08-06 16:08:31 -0700 | [diff] [blame] | 1234 | &zram->stats.compr_data_size); |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1235 | out: |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 1236 | atomic64_dec(&zram->stats.pages_stored); |
Minchan Kim | a890b0b | 2017-05-03 14:55:50 -0700 | [diff] [blame] | 1237 | zram_set_handle(zram, index, 0); |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1238 | zram_set_obj_size(zram, index, 0); |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 1239 | WARN_ON_ONCE(zram->table[index].flags & |
| 1240 | ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1241 | } |
| 1242 | |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1243 | static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, |
| 1244 | struct bio *bio, bool partial_io) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1245 | { |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1246 | int ret; |
Minchan Kim | 9296747 | 2014-01-30 15:46:03 -0800 | [diff] [blame] | 1247 | unsigned long handle; |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 1248 | unsigned int size; |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1249 | void *src, *dst; |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1250 | |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1251 | zram_slot_lock(zram, index); |
| 1252 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
| 1253 | struct bio_vec bvec; |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1254 | |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1255 | zram_slot_unlock(zram, index); |
Minchan Kim | 75f69c2 | 2018-12-28 00:36:40 -0800 | [diff] [blame] | 1256 | |
| 1257 | bvec.bv_page = page; |
| 1258 | bvec.bv_len = PAGE_SIZE; |
| 1259 | bvec.bv_offset = 0; |
| 1260 | return read_from_bdev(zram, &bvec, |
| 1261 | zram_get_element(zram, index), |
| 1262 | bio, partial_io); |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1263 | } |
| 1264 | |
Minchan Kim | a890b0b | 2017-05-03 14:55:50 -0700 | [diff] [blame] | 1265 | handle = zram_get_handle(zram, index); |
Minchan Kim | 856f534 | 2017-10-03 16:15:19 -0700 | [diff] [blame] | 1266 | if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { |
| 1267 | unsigned long value; |
| 1268 | void *mem; |
| 1269 | |
| 1270 | value = handle ? zram_get_element(zram, index) : 0; |
| 1271 | mem = kmap_atomic(page); |
| 1272 | zram_fill_page(mem, PAGE_SIZE, value); |
| 1273 | kunmap_atomic(mem); |
| 1274 | zram_slot_unlock(zram, index); |
| 1275 | return 0; |
| 1276 | } |
| 1277 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1278 | size = zram_get_obj_size(zram, index); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1279 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1280 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 1281 | if (size == PAGE_SIZE) { |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1282 | dst = kmap_atomic(page); |
| 1283 | memcpy(dst, src, PAGE_SIZE); |
| 1284 | kunmap_atomic(dst); |
| 1285 | ret = 0; |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 1286 | } else { |
| 1287 | struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); |
| 1288 | |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1289 | dst = kmap_atomic(page); |
| 1290 | ret = zcomp_decompress(zstrm, src, size, dst); |
| 1291 | kunmap_atomic(dst); |
Sergey Senozhatsky | ebaf9ab | 2016-07-26 15:22:45 -0700 | [diff] [blame] | 1292 | zcomp_stream_put(zram->comp); |
| 1293 | } |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1294 | zs_unmap_object(zram->mem_pool, handle); |
Minchan Kim | 425db41 | 2017-05-03 14:55:44 -0700 | [diff] [blame] | 1295 | zram_slot_unlock(zram, index); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1296 | |
| 1297 | /* Should NEVER happen. Return bio error if it does. */ |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 1298 | if (unlikely(ret)) |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1299 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 1300 | |
Sergey Senozhatsky | 37b51fd | 2012-10-30 22:40:23 +0300 | [diff] [blame] | 1301 | return ret; |
| 1302 | } |
| 1303 | |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1304 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1305 | u32 index, int offset, struct bio *bio) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1306 | { |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1307 | int ret; |
Minchan Kim | 130f315 | 2012-06-08 15:39:27 +0900 | [diff] [blame] | 1308 | struct page *page; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1309 | |
| 1310 | page = bvec->bv_page; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1311 | if (is_partial_io(bvec)) { |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1312 | /* Use a temporary buffer to decompress the page */ |
| 1313 | page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); |
| 1314 | if (!page) |
| 1315 | return -ENOMEM; |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1316 | } |
| 1317 | |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1318 | ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1319 | if (unlikely(ret)) |
| 1320 | goto out; |
| 1321 | |
| 1322 | if (is_partial_io(bvec)) { |
| 1323 | void *dst = kmap_atomic(bvec->bv_page); |
| 1324 | void *src = kmap_atomic(page); |
| 1325 | |
| 1326 | memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); |
| 1327 | kunmap_atomic(src); |
| 1328 | kunmap_atomic(dst); |
| 1329 | } |
| 1330 | out: |
| 1331 | if (is_partial_io(bvec)) |
| 1332 | __free_page(page); |
| 1333 | |
| 1334 | return ret; |
| 1335 | } |
| 1336 | |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1337 | static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
| 1338 | u32 index, struct bio *bio) |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1339 | { |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1340 | int ret = 0; |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1341 | unsigned long alloced_pages; |
| 1342 | unsigned long handle = 0; |
Minchan Kim | 56e03b0 | 2017-09-06 16:19:47 -0700 | [diff] [blame] | 1343 | unsigned int comp_len = 0; |
| 1344 | void *src, *dst, *mem; |
| 1345 | struct zcomp_strm *zstrm; |
| 1346 | struct page *page = bvec->bv_page; |
| 1347 | unsigned long element = 0; |
| 1348 | enum zram_pageflags flags = 0; |
| 1349 | |
| 1350 | mem = kmap_atomic(page); |
| 1351 | if (page_same_filled(mem, &element)) { |
| 1352 | kunmap_atomic(mem); |
| 1353 | /* Free memory associated with this sector now. */ |
| 1354 | flags = ZRAM_SAME; |
| 1355 | atomic64_inc(&zram->stats.same_pages); |
| 1356 | goto out; |
| 1357 | } |
| 1358 | kunmap_atomic(mem); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1359 | |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1360 | compress_again: |
Minchan Kim | 56e03b0 | 2017-09-06 16:19:47 -0700 | [diff] [blame] | 1361 | zstrm = zcomp_stream_get(zram->comp); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1362 | src = kmap_atomic(page); |
Minchan Kim | 56e03b0 | 2017-09-06 16:19:47 -0700 | [diff] [blame] | 1363 | ret = zcomp_compress(zstrm, src, &comp_len); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1364 | kunmap_atomic(src); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1365 | |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 1366 | if (unlikely(ret)) { |
Minchan Kim | 56e03b0 | 2017-09-06 16:19:47 -0700 | [diff] [blame] | 1367 | zcomp_stream_put(zram->comp); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1368 | pr_err("Compression failed! err=%d\n", ret); |
Minchan Kim | 56e03b0 | 2017-09-06 16:19:47 -0700 | [diff] [blame] | 1369 | zs_free(zram->mem_pool, handle); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1370 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1371 | } |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1372 | |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 1373 | if (comp_len >= huge_class_size) |
Peter Kalauskas | 109a48e | 2018-11-08 11:16:03 -0800 | [diff] [blame] | 1374 | comp_len = PAGE_SIZE; |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1375 | /* |
| 1376 | * handle allocation has 2 paths: |
| 1377 | * a) fast path is executed with preemption disabled (for |
| 1378 | * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, |
| 1379 | * since we can't sleep; |
| 1380 | * b) slow path enables preemption and attempts to allocate |
| 1381 | * the page with __GFP_DIRECT_RECLAIM bit set. we have to |
| 1382 | * put per-cpu compression stream and, thus, to re-do |
| 1383 | * the compression once handle is allocated. |
| 1384 | * |
| 1385 | * if we have a 'non-null' handle here then we are coming |
| 1386 | * from the slow path and handle has already been allocated. |
| 1387 | */ |
| 1388 | if (!handle) |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1389 | handle = zs_malloc(zram->mem_pool, comp_len, |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1390 | __GFP_KSWAPD_RECLAIM | |
| 1391 | __GFP_NOWARN | |
Minchan Kim | 9bc482d | 2016-07-26 15:23:34 -0700 | [diff] [blame] | 1392 | __GFP_HIGHMEM | |
| 1393 | __GFP_MOVABLE); |
Nitin Gupta | fd1a30d | 2012-01-09 16:51:59 -0600 | [diff] [blame] | 1394 | if (!handle) { |
Sergey Senozhatsky | 2aea849 | 2016-07-26 15:22:42 -0700 | [diff] [blame] | 1395 | zcomp_stream_put(zram->comp); |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1396 | atomic64_inc(&zram->stats.writestall); |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1397 | handle = zs_malloc(zram->mem_pool, comp_len, |
Minchan Kim | 9bc482d | 2016-07-26 15:23:34 -0700 | [diff] [blame] | 1398 | GFP_NOIO | __GFP_HIGHMEM | |
| 1399 | __GFP_MOVABLE); |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1400 | if (handle) |
| 1401 | goto compress_again; |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1402 | return -ENOMEM; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1403 | } |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1404 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1405 | alloced_pages = zs_get_total_pages(zram->mem_pool); |
Sergey SENOZHATSKY | 1237275 | 2015-11-06 16:29:04 -0800 | [diff] [blame] | 1406 | update_used_max(zram, alloced_pages); |
| 1407 | |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 1408 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
Minchan Kim | 56e03b0 | 2017-09-06 16:19:47 -0700 | [diff] [blame] | 1409 | zcomp_stream_put(zram->comp); |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1410 | zs_free(zram->mem_pool, handle); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1411 | return -ENOMEM; |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1412 | } |
| 1413 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1414 | dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1415 | |
| 1416 | src = zstrm->buffer; |
| 1417 | if (comp_len == PAGE_SIZE) |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 1418 | src = kmap_atomic(page); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1419 | memcpy(dst, src, comp_len); |
| 1420 | if (comp_len == PAGE_SIZE) |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 1421 | kunmap_atomic(src); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1422 | |
Sergey Senozhatsky | 2aea849 | 2016-07-26 15:22:42 -0700 | [diff] [blame] | 1423 | zcomp_stream_put(zram->comp); |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1424 | zs_unmap_object(zram->mem_pool, handle); |
Minchan Kim | 2b3d866 | 2017-09-06 16:19:44 -0700 | [diff] [blame] | 1425 | atomic64_add(comp_len, &zram->stats.compr_data_size); |
| 1426 | out: |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 1427 | /* |
| 1428 | * Free memory associated with this sector |
| 1429 | * before overwriting unused sectors. |
| 1430 | */ |
Minchan Kim | 425db41 | 2017-05-03 14:55:44 -0700 | [diff] [blame] | 1431 | zram_slot_lock(zram, index); |
Sunghan Suh | f40ac2a | 2013-07-03 20:10:05 +0900 | [diff] [blame] | 1432 | zram_free_page(zram, index); |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1433 | |
Minchan Kim | f185f71 | 2018-06-07 17:05:42 -0700 | [diff] [blame] | 1434 | if (comp_len == PAGE_SIZE) { |
| 1435 | zram_set_flag(zram, index, ZRAM_HUGE); |
| 1436 | atomic64_inc(&zram->stats.huge_pages); |
| 1437 | } |
| 1438 | |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1439 | if (flags) { |
| 1440 | zram_set_flag(zram, index, flags); |
Minchan Kim | 2b3d866 | 2017-09-06 16:19:44 -0700 | [diff] [blame] | 1441 | zram_set_element(zram, index, element); |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1442 | } else { |
Minchan Kim | 2b3d866 | 2017-09-06 16:19:44 -0700 | [diff] [blame] | 1443 | zram_set_handle(zram, index, handle); |
| 1444 | zram_set_obj_size(zram, index, comp_len); |
| 1445 | } |
Minchan Kim | 425db41 | 2017-05-03 14:55:44 -0700 | [diff] [blame] | 1446 | zram_slot_unlock(zram, index); |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1447 | |
| 1448 | /* Update stats */ |
Sergey Senozhatsky | 90a7806 | 2014-04-07 15:38:03 -0700 | [diff] [blame] | 1449 | atomic64_inc(&zram->stats.pages_stored); |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1450 | return ret; |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1451 | } |
| 1452 | |
| 1453 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1454 | u32 index, int offset, struct bio *bio) |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1455 | { |
| 1456 | int ret; |
| 1457 | struct page *page = NULL; |
| 1458 | void *src; |
| 1459 | struct bio_vec vec; |
| 1460 | |
| 1461 | vec = *bvec; |
| 1462 | if (is_partial_io(bvec)) { |
| 1463 | void *dst; |
| 1464 | /* |
| 1465 | * This is a partial IO. We need to read the full page |
| 1466 | * before to write the changes. |
| 1467 | */ |
| 1468 | page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); |
| 1469 | if (!page) |
| 1470 | return -ENOMEM; |
| 1471 | |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1472 | ret = __zram_bvec_read(zram, page, index, bio, true); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1473 | if (ret) |
| 1474 | goto out; |
| 1475 | |
| 1476 | src = kmap_atomic(bvec->bv_page); |
| 1477 | dst = kmap_atomic(page); |
| 1478 | memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); |
| 1479 | kunmap_atomic(dst); |
| 1480 | kunmap_atomic(src); |
| 1481 | |
| 1482 | vec.bv_page = page; |
| 1483 | vec.bv_len = PAGE_SIZE; |
| 1484 | vec.bv_offset = 0; |
| 1485 | } |
| 1486 | |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1487 | ret = __zram_bvec_write(zram, &vec, index, bio); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1488 | out: |
Nitin Gupta | 397c606 | 2013-01-02 08:53:41 -0800 | [diff] [blame] | 1489 | if (is_partial_io(bvec)) |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1490 | __free_page(page); |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1491 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1492 | } |
| 1493 | |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1494 | /* |
| 1495 | * zram_bio_discard - handler on discard request |
| 1496 | * @index: physical block index in PAGE_SIZE units |
| 1497 | * @offset: byte offset within physical block |
| 1498 | */ |
| 1499 | static void zram_bio_discard(struct zram *zram, u32 index, |
| 1500 | int offset, struct bio *bio) |
| 1501 | { |
| 1502 | size_t n = bio->bi_iter.bi_size; |
| 1503 | |
| 1504 | /* |
| 1505 | * zram manages data in physical block size units. Because logical block |
| 1506 | * size isn't identical with physical block size on some arch, we |
| 1507 | * could get a discard request pointing to a specific offset within a |
| 1508 | * certain physical block. Although we can handle this request by |
| 1509 | * reading that physiclal block and decompressing and partially zeroing |
| 1510 | * and re-compressing and then re-storing it, this isn't reasonable |
| 1511 | * because our intent with a discard request is to save memory. So |
| 1512 | * skipping this logical block is appropriate here. |
| 1513 | */ |
| 1514 | if (offset) { |
Weijie Yang | 38515c7 | 2014-06-04 16:11:06 -0700 | [diff] [blame] | 1515 | if (n <= (PAGE_SIZE - offset)) |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1516 | return; |
| 1517 | |
Weijie Yang | 38515c7 | 2014-06-04 16:11:06 -0700 | [diff] [blame] | 1518 | n -= (PAGE_SIZE - offset); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1519 | index++; |
| 1520 | } |
| 1521 | |
| 1522 | while (n >= PAGE_SIZE) { |
Minchan Kim | 425db41 | 2017-05-03 14:55:44 -0700 | [diff] [blame] | 1523 | zram_slot_lock(zram, index); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1524 | zram_free_page(zram, index); |
Minchan Kim | 425db41 | 2017-05-03 14:55:44 -0700 | [diff] [blame] | 1525 | zram_slot_unlock(zram, index); |
Sergey Senozhatsky | 015254d | 2014-10-09 15:29:57 -0700 | [diff] [blame] | 1526 | atomic64_inc(&zram->stats.notify_free); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1527 | index++; |
| 1528 | n -= PAGE_SIZE; |
| 1529 | } |
| 1530 | } |
| 1531 | |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1532 | /* |
| 1533 | * Returns errno if it has some problem. Otherwise return 0 or 1. |
| 1534 | * Returns 0 if IO request was done synchronously |
| 1535 | * Returns 1 if IO request was successfully submitted. |
| 1536 | */ |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1537 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1538 | int offset, bool is_write, struct bio *bio) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1539 | { |
| 1540 | unsigned long start_time = jiffies; |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1541 | int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1542 | int ret; |
| 1543 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1544 | generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT, |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1545 | &zram->disk->part0); |
| 1546 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1547 | if (!is_write) { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1548 | atomic64_inc(&zram->stats.num_reads); |
Minchan Kim | 0a6c199 | 2017-09-06 16:20:07 -0700 | [diff] [blame] | 1549 | ret = zram_bvec_read(zram, bvec, index, offset, bio); |
Minchan Kim | f61c539 | 2017-05-03 14:55:41 -0700 | [diff] [blame] | 1550 | flush_dcache_page(bvec->bv_page); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1551 | } else { |
| 1552 | atomic64_inc(&zram->stats.num_writes); |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1553 | ret = zram_bvec_write(zram, bvec, index, offset, bio); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1554 | } |
| 1555 | |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1556 | generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1557 | |
Minchan Kim | 754f94b | 2018-06-07 17:05:45 -0700 | [diff] [blame] | 1558 | zram_slot_lock(zram, index); |
| 1559 | zram_accessed(zram, index); |
| 1560 | zram_slot_unlock(zram, index); |
| 1561 | |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1562 | if (unlikely(ret < 0)) { |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1563 | if (!is_write) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1564 | atomic64_inc(&zram->stats.failed_reads); |
| 1565 | else |
| 1566 | atomic64_inc(&zram->stats.failed_writes); |
| 1567 | } |
| 1568 | |
| 1569 | return ret; |
| 1570 | } |
| 1571 | |
| 1572 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
| 1573 | { |
Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 1574 | int offset; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1575 | u32 index; |
| 1576 | struct bio_vec bvec; |
| 1577 | struct bvec_iter iter; |
| 1578 | |
| 1579 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
| 1580 | offset = (bio->bi_iter.bi_sector & |
| 1581 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
| 1582 | |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 1583 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1584 | zram_bio_discard(zram, index, offset, bio); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1585 | bio_endio(bio); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1586 | return; |
| 1587 | } |
| 1588 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1589 | bio_for_each_segment(bvec, bio, iter) { |
Minchan Kim | e7df4ff | 2017-05-03 14:55:38 -0700 | [diff] [blame] | 1590 | struct bio_vec bv = bvec; |
| 1591 | unsigned int unwritten = bvec.bv_len; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1592 | |
Minchan Kim | e7df4ff | 2017-05-03 14:55:38 -0700 | [diff] [blame] | 1593 | do { |
| 1594 | bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, |
| 1595 | unwritten); |
Mike Christie | abf5454 | 2016-08-04 14:23:34 -0600 | [diff] [blame] | 1596 | if (zram_bvec_rw(zram, &bv, index, offset, |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1597 | op_is_write(bio_op(bio)), bio) < 0) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1598 | goto out; |
| 1599 | |
Minchan Kim | e7df4ff | 2017-05-03 14:55:38 -0700 | [diff] [blame] | 1600 | bv.bv_offset += bv.bv_len; |
| 1601 | unwritten -= bv.bv_len; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1602 | |
Minchan Kim | e7df4ff | 2017-05-03 14:55:38 -0700 | [diff] [blame] | 1603 | update_position(&index, &offset, &bv); |
| 1604 | } while (unwritten); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1605 | } |
| 1606 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1607 | bio_endio(bio); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1608 | return; |
| 1609 | |
| 1610 | out: |
| 1611 | bio_io_error(bio); |
| 1612 | } |
| 1613 | |
| 1614 | /* |
| 1615 | * Handler function for all zram I/O requests. |
| 1616 | */ |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 1617 | static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1618 | { |
| 1619 | struct zram *zram = queue->queuedata; |
| 1620 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1621 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
| 1622 | bio->bi_iter.bi_size)) { |
| 1623 | atomic64_inc(&zram->stats.invalid_io); |
Minchan Kim | a73779c | 2017-02-24 14:56:47 -0800 | [diff] [blame] | 1624 | goto error; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1625 | } |
| 1626 | |
| 1627 | __zram_make_request(zram, bio); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 1628 | return BLK_QC_T_NONE; |
Minchan Kim | a73779c | 2017-02-24 14:56:47 -0800 | [diff] [blame] | 1629 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1630 | error: |
| 1631 | bio_io_error(bio); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 1632 | return BLK_QC_T_NONE; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1633 | } |
| 1634 | |
| 1635 | static void zram_slot_free_notify(struct block_device *bdev, |
| 1636 | unsigned long index) |
| 1637 | { |
| 1638 | struct zram *zram; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1639 | |
| 1640 | zram = bdev->bd_disk->private_data; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1641 | |
Minchan Kim | 97cebf9 | 2018-12-28 00:36:33 -0800 | [diff] [blame] | 1642 | atomic64_inc(&zram->stats.notify_free); |
| 1643 | if (!zram_slot_trylock(zram, index)) { |
| 1644 | atomic64_inc(&zram->stats.miss_free); |
| 1645 | return; |
| 1646 | } |
| 1647 | |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1648 | zram_free_page(zram, index); |
Minchan Kim | 425db41 | 2017-05-03 14:55:44 -0700 | [diff] [blame] | 1649 | zram_slot_unlock(zram, index); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1650 | } |
| 1651 | |
| 1652 | static int zram_rw_page(struct block_device *bdev, sector_t sector, |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1653 | struct page *page, bool is_write) |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1654 | { |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1655 | int offset, ret; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1656 | u32 index; |
| 1657 | struct zram *zram; |
| 1658 | struct bio_vec bv; |
| 1659 | |
| 1660 | zram = bdev->bd_disk->private_data; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1661 | |
| 1662 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { |
| 1663 | atomic64_inc(&zram->stats.invalid_io); |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1664 | ret = -EINVAL; |
Minchan Kim | a73779c | 2017-02-24 14:56:47 -0800 | [diff] [blame] | 1665 | goto out; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1666 | } |
| 1667 | |
| 1668 | index = sector >> SECTORS_PER_PAGE_SHIFT; |
Minchan Kim | 7d53d47 | 2017-04-13 14:56:35 -0700 | [diff] [blame] | 1669 | offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1670 | |
| 1671 | bv.bv_page = page; |
| 1672 | bv.bv_len = PAGE_SIZE; |
| 1673 | bv.bv_offset = 0; |
| 1674 | |
Minchan Kim | 598d053 | 2017-09-06 16:20:03 -0700 | [diff] [blame] | 1675 | ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL); |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1676 | out: |
| 1677 | /* |
| 1678 | * If I/O fails, just return error(ie, non-zero) without |
| 1679 | * calling page_endio. |
| 1680 | * It causes resubmit the I/O with bio request by upper functions |
| 1681 | * of rw_page(e.g., swap_readpage, __swap_writepage) and |
| 1682 | * bio->bi_end_io does things to handle the error |
| 1683 | * (e.g., SetPageError, set_page_dirty and extra works). |
| 1684 | */ |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1685 | if (unlikely(ret < 0)) |
| 1686 | return ret; |
| 1687 | |
| 1688 | switch (ret) { |
| 1689 | case 0: |
Jens Axboe | c11f0c0 | 2016-08-05 08:11:04 -0600 | [diff] [blame] | 1690 | page_endio(page, is_write, 0); |
Minchan Kim | b53858b | 2017-09-06 16:20:00 -0700 | [diff] [blame] | 1691 | break; |
| 1692 | case 1: |
| 1693 | ret = 0; |
| 1694 | break; |
| 1695 | default: |
| 1696 | WARN_ON(1); |
| 1697 | } |
| 1698 | return ret; |
Sergey Senozhatsky | 522698d | 2015-06-25 15:00:08 -0700 | [diff] [blame] | 1699 | } |
| 1700 | |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1701 | static void zram_reset_device(struct zram *zram) |
Jerome Marchand | 924bd88 | 2011-06-10 15:28:48 +0200 | [diff] [blame] | 1702 | { |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1703 | struct zcomp *comp; |
| 1704 | u64 disksize; |
| 1705 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1706 | down_write(&zram->init_lock); |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1707 | |
| 1708 | zram->limit_pages = 0; |
| 1709 | |
Sergey Senozhatsky | be2d1d5 | 2014-04-07 15:38:00 -0700 | [diff] [blame] | 1710 | if (!init_done(zram)) { |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1711 | up_write(&zram->init_lock); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1712 | return; |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1713 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1714 | |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1715 | comp = zram->comp; |
| 1716 | disksize = zram->disksize; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1717 | zram->disksize = 0; |
Weijie Yang | d7ad41a | 2015-06-10 11:14:49 -0700 | [diff] [blame] | 1718 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1719 | set_capacity(zram->disk, 0); |
Weijie Yang | d7ad41a | 2015-06-10 11:14:49 -0700 | [diff] [blame] | 1720 | part_stat_set_all(&zram->disk->part0, 0); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 1721 | |
Sergey Senozhatsky | 644d478 | 2013-06-26 15:28:39 +0300 | [diff] [blame] | 1722 | up_write(&zram->init_lock); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1723 | /* I/O operation under all of CPU are done so let's free */ |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1724 | zram_meta_free(zram, disksize); |
Minchan Kim | ffa3b81 | 2017-05-03 14:55:53 -0700 | [diff] [blame] | 1725 | memset(&zram->stats, 0, sizeof(zram->stats)); |
Minchan Kim | 08eee69 | 2015-02-12 15:00:45 -0800 | [diff] [blame] | 1726 | zcomp_destroy(comp); |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 1727 | reset_bdev(zram); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1728 | } |
| 1729 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1730 | static ssize_t disksize_store(struct device *dev, |
| 1731 | struct device_attribute *attr, const char *buf, size_t len) |
| 1732 | { |
| 1733 | u64 disksize; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1734 | struct zcomp *comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1735 | struct zram *zram = dev_to_zram(dev); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1736 | int err; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1737 | |
| 1738 | disksize = memparse(buf, NULL); |
| 1739 | if (!disksize) |
| 1740 | return -EINVAL; |
| 1741 | |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1742 | down_write(&zram->init_lock); |
| 1743 | if (init_done(zram)) { |
| 1744 | pr_info("Cannot change disksize for initialized device\n"); |
| 1745 | err = -EBUSY; |
| 1746 | goto out_unlock; |
| 1747 | } |
| 1748 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1749 | disksize = PAGE_ALIGN(disksize); |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1750 | if (!zram_meta_alloc(zram, disksize)) { |
| 1751 | err = -ENOMEM; |
| 1752 | goto out_unlock; |
| 1753 | } |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 1754 | |
Sergey Senozhatsky | da9556a | 2016-05-20 16:59:51 -0700 | [diff] [blame] | 1755 | comp = zcomp_create(zram->compressor); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1756 | if (IS_ERR(comp)) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1757 | pr_err("Cannot initialise %s compressing backend\n", |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1758 | zram->compressor); |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1759 | err = PTR_ERR(comp); |
| 1760 | goto out_free_meta; |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1761 | } |
| 1762 | |
Sergey Senozhatsky | d61f98c | 2014-04-07 15:38:19 -0700 | [diff] [blame] | 1763 | zram->comp = comp; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1764 | zram->disksize = disksize; |
| 1765 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
Minchan Kim | 34a17b1 | 2017-11-15 17:32:56 -0800 | [diff] [blame] | 1766 | |
| 1767 | revalidate_disk(zram->disk); |
Minchan Kim | ad4764b | 2017-01-10 16:58:18 -0800 | [diff] [blame] | 1768 | up_write(&zram->init_lock); |
Minchan Kim | b4c5c60 | 2014-07-23 14:00:04 -0700 | [diff] [blame] | 1769 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1770 | return len; |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 1771 | |
Sergey Senozhatsky | fcfa8d9 | 2014-04-07 15:38:20 -0700 | [diff] [blame] | 1772 | out_free_meta: |
Minchan Kim | 6cb8954 | 2017-05-03 14:55:47 -0700 | [diff] [blame] | 1773 | zram_meta_free(zram, disksize); |
| 1774 | out_unlock: |
| 1775 | up_write(&zram->init_lock); |
Sergey Senozhatsky | b7ca232 | 2014-04-07 15:38:12 -0700 | [diff] [blame] | 1776 | return err; |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1777 | } |
| 1778 | |
| 1779 | static ssize_t reset_store(struct device *dev, |
| 1780 | struct device_attribute *attr, const char *buf, size_t len) |
| 1781 | { |
| 1782 | int ret; |
| 1783 | unsigned short do_reset; |
| 1784 | struct zram *zram; |
| 1785 | struct block_device *bdev; |
| 1786 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1787 | ret = kstrtou16(buf, 10, &do_reset); |
| 1788 | if (ret) |
| 1789 | return ret; |
| 1790 | |
| 1791 | if (!do_reset) |
| 1792 | return -EINVAL; |
| 1793 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1794 | zram = dev_to_zram(dev); |
| 1795 | bdev = bdget_disk(zram->disk, 0); |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 1796 | if (!bdev) |
| 1797 | return -ENOMEM; |
| 1798 | |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1799 | mutex_lock(&bdev->bd_mutex); |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1800 | /* Do not reset an active device or claimed device */ |
| 1801 | if (bdev->bd_openers || zram->claim) { |
| 1802 | mutex_unlock(&bdev->bd_mutex); |
| 1803 | bdput(bdev); |
| 1804 | return -EBUSY; |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 1805 | } |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1806 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1807 | /* From now on, anyone can't open /dev/zram[0-9] */ |
| 1808 | zram->claim = true; |
| 1809 | mutex_unlock(&bdev->bd_mutex); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1810 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1811 | /* Make sure all the pending I/O are finished */ |
Rashika Kheria | 46a51c8 | 2013-10-30 18:36:32 +0530 | [diff] [blame] | 1812 | fsync_bdev(bdev); |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1813 | zram_reset_device(zram); |
Minchan Kim | 34a17b1 | 2017-11-15 17:32:56 -0800 | [diff] [blame] | 1814 | revalidate_disk(zram->disk); |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 1815 | bdput(bdev); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1816 | |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1817 | mutex_lock(&bdev->bd_mutex); |
| 1818 | zram->claim = false; |
Sergey Senozhatsky | ba6b17d | 2015-02-12 15:00:36 -0800 | [diff] [blame] | 1819 | mutex_unlock(&bdev->bd_mutex); |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1820 | |
| 1821 | return len; |
| 1822 | } |
| 1823 | |
| 1824 | static int zram_open(struct block_device *bdev, fmode_t mode) |
| 1825 | { |
| 1826 | int ret = 0; |
| 1827 | struct zram *zram; |
| 1828 | |
| 1829 | WARN_ON(!mutex_is_locked(&bdev->bd_mutex)); |
| 1830 | |
| 1831 | zram = bdev->bd_disk->private_data; |
| 1832 | /* zram was claimed to reset so open request fails */ |
| 1833 | if (zram->claim) |
| 1834 | ret = -EBUSY; |
| 1835 | |
Rashika Kheria | 1b67222 | 2013-11-10 22:13:53 +0530 | [diff] [blame] | 1836 | return ret; |
Jerome Marchand | 8c921b2 | 2011-06-10 15:28:47 +0200 | [diff] [blame] | 1837 | } |
| 1838 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1839 | static const struct block_device_operations zram_devops = { |
Sergey Senozhatsky | f405c44 | 2015-06-25 15:00:21 -0700 | [diff] [blame] | 1840 | .open = zram_open, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1841 | .swap_slot_free_notify = zram_slot_free_notify, |
karam.lee | 8c7f010 | 2014-12-12 16:56:53 -0800 | [diff] [blame] | 1842 | .rw_page = zram_rw_page, |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 1843 | .owner = THIS_MODULE |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1844 | }; |
| 1845 | |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 1846 | static DEVICE_ATTR_WO(compact); |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 1847 | static DEVICE_ATTR_RW(disksize); |
| 1848 | static DEVICE_ATTR_RO(initstate); |
| 1849 | static DEVICE_ATTR_WO(reset); |
Sergey Senozhatsky | f29eb69 | 2017-02-22 15:46:45 -0800 | [diff] [blame] | 1850 | static DEVICE_ATTR_WO(mem_limit); |
| 1851 | static DEVICE_ATTR_WO(mem_used_max); |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 1852 | static DEVICE_ATTR_WO(idle); |
Ganesh Mahendran | 083914e | 2014-12-12 16:57:13 -0800 | [diff] [blame] | 1853 | static DEVICE_ATTR_RW(max_comp_streams); |
| 1854 | static DEVICE_ATTR_RW(comp_algorithm); |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 1855 | #ifdef CONFIG_ZRAM_WRITEBACK |
| 1856 | static DEVICE_ATTR_RW(backing_dev); |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 1857 | static DEVICE_ATTR_WO(writeback); |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 1858 | static DEVICE_ATTR_RW(writeback_limit); |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 1859 | static DEVICE_ATTR_RW(writeback_limit_enable); |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 1860 | #endif |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1861 | |
| 1862 | static struct attribute *zram_disk_attrs[] = { |
| 1863 | &dev_attr_disksize.attr, |
| 1864 | &dev_attr_initstate.attr, |
| 1865 | &dev_attr_reset.attr, |
Andrew Morton | 99ebbd30 | 2015-05-05 16:23:25 -0700 | [diff] [blame] | 1866 | &dev_attr_compact.attr, |
Minchan Kim | 9ada9da | 2014-10-09 15:29:53 -0700 | [diff] [blame] | 1867 | &dev_attr_mem_limit.attr, |
Minchan Kim | 461a8ee | 2014-10-09 15:29:55 -0700 | [diff] [blame] | 1868 | &dev_attr_mem_used_max.attr, |
Minchan Kim | 149be47 | 2018-12-28 00:36:44 -0800 | [diff] [blame] | 1869 | &dev_attr_idle.attr, |
Sergey Senozhatsky | beca3ec | 2014-04-07 15:38:14 -0700 | [diff] [blame] | 1870 | &dev_attr_max_comp_streams.attr, |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1871 | &dev_attr_comp_algorithm.attr, |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 1872 | #ifdef CONFIG_ZRAM_WRITEBACK |
| 1873 | &dev_attr_backing_dev.attr, |
Minchan Kim | 86d820b | 2018-12-28 00:36:47 -0800 | [diff] [blame] | 1874 | &dev_attr_writeback.attr, |
Minchan Kim | 2cf97fa | 2018-12-28 00:36:54 -0800 | [diff] [blame] | 1875 | &dev_attr_writeback_limit.attr, |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 1876 | &dev_attr_writeback_limit_enable.attr, |
Minchan Kim | 9ac886a | 2017-09-06 16:19:54 -0700 | [diff] [blame] | 1877 | #endif |
Sergey Senozhatsky | 2f6a3be | 2015-04-15 16:16:03 -0700 | [diff] [blame] | 1878 | &dev_attr_io_stat.attr, |
Sergey Senozhatsky | 4f2109f | 2015-04-15 16:16:06 -0700 | [diff] [blame] | 1879 | &dev_attr_mm_stat.attr, |
Minchan Kim | e1dd5d1 | 2018-12-28 00:36:51 -0800 | [diff] [blame] | 1880 | #ifdef CONFIG_ZRAM_WRITEBACK |
| 1881 | &dev_attr_bd_stat.attr, |
| 1882 | #endif |
Sergey Senozhatsky | 623e47f | 2016-05-20 17:00:02 -0700 | [diff] [blame] | 1883 | &dev_attr_debug_stat.attr, |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1884 | NULL, |
| 1885 | }; |
| 1886 | |
Arvind Yadav | 15a54fc | 2017-07-10 15:50:15 -0700 | [diff] [blame] | 1887 | static const struct attribute_group zram_disk_attr_group = { |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 1888 | .attrs = zram_disk_attrs, |
| 1889 | }; |
| 1890 | |
Minchan Kim | 553a561 | 2018-11-23 15:28:02 +0900 | [diff] [blame] | 1891 | static const struct attribute_group *zram_disk_attr_groups[] = { |
| 1892 | &zram_disk_attr_group, |
| 1893 | NULL, |
| 1894 | }; |
| 1895 | |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1896 | /* |
| 1897 | * Allocate and initialize new zram device. the function returns |
| 1898 | * '>= 0' device_id upon success, and negative value otherwise. |
| 1899 | */ |
| 1900 | static int zram_add(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1901 | { |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1902 | struct zram *zram; |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1903 | struct request_queue *queue; |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1904 | int ret, device_id; |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1905 | |
| 1906 | zram = kzalloc(sizeof(struct zram), GFP_KERNEL); |
| 1907 | if (!zram) |
| 1908 | return -ENOMEM; |
| 1909 | |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1910 | ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1911 | if (ret < 0) |
| 1912 | goto out_free_dev; |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1913 | device_id = ret; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1914 | |
Jerome Marchand | 0900bea | 2011-09-06 15:02:11 +0200 | [diff] [blame] | 1915 | init_rwsem(&zram->init_lock); |
Minchan Kim | f26c1b2 | 2019-01-08 15:22:53 -0800 | [diff] [blame] | 1916 | #ifdef CONFIG_ZRAM_WRITEBACK |
| 1917 | spin_lock_init(&zram->wb_limit_lock); |
| 1918 | #endif |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1919 | queue = blk_alloc_queue(GFP_KERNEL); |
| 1920 | if (!queue) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1921 | pr_err("Error allocating disk queue for device %d\n", |
| 1922 | device_id); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1923 | ret = -ENOMEM; |
| 1924 | goto out_free_idr; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1925 | } |
| 1926 | |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1927 | blk_queue_make_request(queue, zram_make_request); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1928 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1929 | /* gendisk structure */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1930 | zram->disk = alloc_disk(1); |
| 1931 | if (!zram->disk) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 1932 | pr_err("Error allocating disk structure for device %d\n", |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1933 | device_id); |
Julia Lawall | 201c7b7 | 2015-04-15 16:16:27 -0700 | [diff] [blame] | 1934 | ret = -ENOMEM; |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1935 | goto out_free_queue; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1936 | } |
| 1937 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1938 | zram->disk->major = zram_major; |
| 1939 | zram->disk->first_minor = device_id; |
| 1940 | zram->disk->fops = &zram_devops; |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1941 | zram->disk->queue = queue; |
| 1942 | zram->disk->queue->queuedata = zram; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1943 | zram->disk->private_data = zram; |
| 1944 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1945 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 1946 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1947 | set_capacity(zram->disk, 0); |
Sergey Senozhatsky | b67d1ec | 2014-04-07 15:38:09 -0700 | [diff] [blame] | 1948 | /* zram devices sort of resembles non-rotational disks */ |
| 1949 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 1950 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); |
Minchan Kim | 34a17b1 | 2017-11-15 17:32:56 -0800 | [diff] [blame] | 1951 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 1952 | /* |
| 1953 | * To ensure that we always get PAGE_SIZE aligned |
| 1954 | * and n*PAGE_SIZED sized I/O requests. |
| 1955 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1956 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
Robert Jennings | 7b19b8d | 2011-01-28 08:58:17 -0600 | [diff] [blame] | 1957 | blk_queue_logical_block_size(zram->disk->queue, |
| 1958 | ZRAM_LOGICAL_BLOCK_SIZE); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1959 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
| 1960 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1961 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 1962 | blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); |
Joonsoo Kim | f4659d8 | 2014-04-07 15:38:24 -0700 | [diff] [blame] | 1963 | /* |
| 1964 | * zram_bio_discard() will clear all logical blocks if logical block |
| 1965 | * size is identical with physical block size(PAGE_SIZE). But if it is |
| 1966 | * different, we will skip discarding some parts of logical blocks in |
| 1967 | * the part of the request range which isn't aligned to physical block |
| 1968 | * size. So we can't ensure that all discarded logical blocks are |
| 1969 | * zeroed. |
| 1970 | */ |
| 1971 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) |
| 1972 | zram->disk->queue->limits.discard_zeroes_data = 1; |
| 1973 | else |
| 1974 | zram->disk->queue->limits.discard_zeroes_data = 0; |
| 1975 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 1976 | |
Minchan Kim | 34a17b1 | 2017-11-15 17:32:56 -0800 | [diff] [blame] | 1977 | zram->disk->queue->backing_dev_info.capabilities |= |
| 1978 | BDI_CAP_STABLE_WRITES; |
Greg Kroah-Hartman | 8fe4284 | 2018-11-27 18:57:51 +0100 | [diff] [blame] | 1979 | |
Minchan Kim | 553a561 | 2018-11-23 15:28:02 +0900 | [diff] [blame] | 1980 | disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 1981 | add_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1982 | |
Sergey Senozhatsky | e46b8a0 | 2014-04-07 15:38:17 -0700 | [diff] [blame] | 1983 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
Sergey Senozhatsky | d12b63c | 2015-06-25 15:00:14 -0700 | [diff] [blame] | 1984 | |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 1985 | zram_debugfs_register(zram); |
Sergey Senozhatsky | d12b63c | 2015-06-25 15:00:14 -0700 | [diff] [blame] | 1986 | pr_info("Added device: %s\n", zram->disk->disk_name); |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 1987 | return device_id; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1988 | |
Jiang Liu | 39a9b8a | 2013-06-07 00:07:24 +0800 | [diff] [blame] | 1989 | out_free_queue: |
Sergey Senozhatsky | ee980160 | 2015-02-12 15:00:48 -0800 | [diff] [blame] | 1990 | blk_cleanup_queue(queue); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 1991 | out_free_idr: |
| 1992 | idr_remove(&zram_index_idr, device_id); |
| 1993 | out_free_dev: |
| 1994 | kfree(zram); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 1995 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1996 | } |
| 1997 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 1998 | static int zram_remove(struct zram *zram) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1999 | { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2000 | struct block_device *bdev; |
| 2001 | |
| 2002 | bdev = bdget_disk(zram->disk, 0); |
| 2003 | if (!bdev) |
| 2004 | return -ENOMEM; |
| 2005 | |
| 2006 | mutex_lock(&bdev->bd_mutex); |
| 2007 | if (bdev->bd_openers || zram->claim) { |
| 2008 | mutex_unlock(&bdev->bd_mutex); |
| 2009 | bdput(bdev); |
| 2010 | return -EBUSY; |
| 2011 | } |
| 2012 | |
| 2013 | zram->claim = true; |
| 2014 | mutex_unlock(&bdev->bd_mutex); |
| 2015 | |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 2016 | zram_debugfs_unregister(zram); |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 2017 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2018 | /* Make sure all the pending I/O are finished */ |
| 2019 | fsync_bdev(bdev); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2020 | zram_reset_device(zram); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2021 | bdput(bdev); |
| 2022 | |
| 2023 | pr_info("Removed device: %s\n", zram->disk->disk_name); |
| 2024 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2025 | del_gendisk(zram->disk); |
Bart Van Assche | 8aaf440 | 2018-02-28 10:15:30 -0800 | [diff] [blame] | 2026 | blk_cleanup_queue(zram->disk->queue); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2027 | put_disk(zram->disk); |
| 2028 | kfree(zram); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2029 | return 0; |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2030 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2031 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2032 | /* zram-control sysfs attributes */ |
| 2033 | static ssize_t hot_add_show(struct class *class, |
| 2034 | struct class_attribute *attr, |
| 2035 | char *buf) |
| 2036 | { |
| 2037 | int ret; |
| 2038 | |
| 2039 | mutex_lock(&zram_index_mutex); |
| 2040 | ret = zram_add(); |
| 2041 | mutex_unlock(&zram_index_mutex); |
| 2042 | |
| 2043 | if (ret < 0) |
| 2044 | return ret; |
| 2045 | return scnprintf(buf, PAGE_SIZE, "%d\n", ret); |
| 2046 | } |
| 2047 | |
| 2048 | static ssize_t hot_remove_store(struct class *class, |
| 2049 | struct class_attribute *attr, |
| 2050 | const char *buf, |
| 2051 | size_t count) |
| 2052 | { |
| 2053 | struct zram *zram; |
| 2054 | int ret, dev_id; |
| 2055 | |
| 2056 | /* dev_id is gendisk->first_minor, which is `int' */ |
| 2057 | ret = kstrtoint(buf, 10, &dev_id); |
| 2058 | if (ret) |
| 2059 | return ret; |
| 2060 | if (dev_id < 0) |
| 2061 | return -EINVAL; |
| 2062 | |
| 2063 | mutex_lock(&zram_index_mutex); |
| 2064 | |
| 2065 | zram = idr_find(&zram_index_idr, dev_id); |
Jerome Marchand | 17ec4cd | 2016-01-15 16:54:48 -0800 | [diff] [blame] | 2066 | if (zram) { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2067 | ret = zram_remove(zram); |
Takashi Iwai | 529e71e | 2016-11-30 15:54:08 -0800 | [diff] [blame] | 2068 | if (!ret) |
| 2069 | idr_remove(&zram_index_idr, dev_id); |
Jerome Marchand | 17ec4cd | 2016-01-15 16:54:48 -0800 | [diff] [blame] | 2070 | } else { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2071 | ret = -ENODEV; |
Jerome Marchand | 17ec4cd | 2016-01-15 16:54:48 -0800 | [diff] [blame] | 2072 | } |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2073 | |
| 2074 | mutex_unlock(&zram_index_mutex); |
| 2075 | return ret ? ret : count; |
| 2076 | } |
| 2077 | |
Sergey Senozhatsky | 5c7e9cc | 2016-12-07 14:44:31 -0800 | [diff] [blame] | 2078 | /* |
| 2079 | * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a |
| 2080 | * sense that reading from this file does alter the state of your system -- it |
| 2081 | * creates a new un-initialized zram device and returns back this device's |
| 2082 | * device_id (or an error code if it fails to create a new device). |
| 2083 | */ |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2084 | static struct class_attribute zram_control_class_attrs[] = { |
Sergey Senozhatsky | 5c7e9cc | 2016-12-07 14:44:31 -0800 | [diff] [blame] | 2085 | __ATTR(hot_add, 0400, hot_add_show, NULL), |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2086 | __ATTR_WO(hot_remove), |
| 2087 | __ATTR_NULL, |
| 2088 | }; |
| 2089 | |
| 2090 | static struct class zram_control_class = { |
| 2091 | .name = "zram-control", |
| 2092 | .owner = THIS_MODULE, |
| 2093 | .class_attrs = zram_control_class_attrs, |
| 2094 | }; |
| 2095 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2096 | static int zram_remove_cb(int id, void *ptr, void *data) |
| 2097 | { |
| 2098 | zram_remove(ptr); |
| 2099 | return 0; |
| 2100 | } |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 2101 | |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2102 | static void destroy_devices(void) |
| 2103 | { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2104 | class_unregister(&zram_control_class); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2105 | idr_for_each(&zram_index_idr, &zram_remove_cb, NULL); |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 2106 | zram_debugfs_destroy(); |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2107 | idr_destroy(&zram_index_idr); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 2108 | unregister_blkdev(zram_major, "zram"); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2109 | } |
| 2110 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2111 | static int __init zram_init(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2112 | { |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 2113 | int ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2114 | |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2115 | ret = class_register(&zram_control_class); |
| 2116 | if (ret) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 2117 | pr_err("Unable to register zram-control class\n"); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2118 | return ret; |
| 2119 | } |
| 2120 | |
Minchan Kim | f1dcb85 | 2018-06-07 17:05:49 -0700 | [diff] [blame] | 2121 | zram_debugfs_create(); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2122 | zram_major = register_blkdev(0, "zram"); |
| 2123 | if (zram_major <= 0) { |
Sergey Senozhatsky | 7086496 | 2015-09-08 15:04:58 -0700 | [diff] [blame] | 2124 | pr_err("Unable to get major number\n"); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2125 | class_unregister(&zram_control_class); |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 2126 | return -EBUSY; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2127 | } |
| 2128 | |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 2129 | while (num_devices != 0) { |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2130 | mutex_lock(&zram_index_mutex); |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 2131 | ret = zram_add(); |
Sergey Senozhatsky | 6566d1a | 2015-06-25 15:00:24 -0700 | [diff] [blame] | 2132 | mutex_unlock(&zram_index_mutex); |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 2133 | if (ret < 0) |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 2134 | goto out_error; |
Sergey Senozhatsky | 92ff152 | 2015-06-25 15:00:19 -0700 | [diff] [blame] | 2135 | num_devices--; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 2136 | } |
| 2137 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2138 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 2139 | |
Sergey Senozhatsky | a096caf | 2015-02-12 15:00:39 -0800 | [diff] [blame] | 2140 | out_error: |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2141 | destroy_devices(); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2142 | return ret; |
| 2143 | } |
| 2144 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2145 | static void __exit zram_exit(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2146 | { |
Sergey Senozhatsky | 85508ec | 2015-06-25 15:00:06 -0700 | [diff] [blame] | 2147 | destroy_devices(); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2148 | } |
| 2149 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2150 | module_init(zram_init); |
| 2151 | module_exit(zram_exit); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2152 | |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 2153 | module_param(num_devices, uint, 0); |
Sergey Senozhatsky | c3cdb40 | 2015-06-25 15:00:11 -0700 | [diff] [blame] | 2154 | MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices"); |
Sergey Senozhatsky | 9b3bb7a | 2013-06-22 03:21:18 +0300 | [diff] [blame] | 2155 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 2156 | MODULE_LICENSE("Dual BSD/GPL"); |
| 2157 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2158 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |