Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 1 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 2 | * Compressed RAM block device |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 3 | * |
Nitin Gupta | 1130ebb | 2010-01-28 21:21:35 +0530 | [diff] [blame] | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 5 | * |
| 6 | * This code is released using a dual license strategy: BSD/GPL |
| 7 | * You can choose the licence that better fits your requirements. |
| 8 | * |
| 9 | * Released under the terms of 3-clause BSD License |
| 10 | * Released under the terms of GNU General Public License Version 2.0 |
| 11 | * |
| 12 | * Project home: http://compcache.googlecode.com |
| 13 | */ |
| 14 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 15 | #define KMSG_COMPONENT "zram" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 17 | |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kernel.h> |
Randy Dunlap | 8946a08 | 2010-06-23 20:27:09 -0700 | [diff] [blame] | 20 | #include <linux/bio.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 21 | #include <linux/bitops.h> |
| 22 | #include <linux/blkdev.h> |
| 23 | #include <linux/buffer_head.h> |
| 24 | #include <linux/device.h> |
| 25 | #include <linux/genhd.h> |
| 26 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 27 | #include <linux/slab.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 28 | #include <linux/lzo.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 29 | #include <linux/string.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 30 | #include <linux/vmalloc.h> |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 31 | |
Nitin Gupta | 16a4bfb | 2010-06-01 13:31:24 +0530 | [diff] [blame] | 32 | #include "zram_drv.h" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 33 | |
| 34 | /* Globals */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 35 | static int zram_major; |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 36 | struct zram *devices; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 37 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 38 | /* Module params (documentation at end) */ |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 39 | unsigned int num_devices; |
| 40 | |
| 41 | static void zram_stat_inc(u32 *v) |
| 42 | { |
| 43 | *v = *v + 1; |
| 44 | } |
| 45 | |
| 46 | static void zram_stat_dec(u32 *v) |
| 47 | { |
| 48 | *v = *v - 1; |
| 49 | } |
| 50 | |
| 51 | static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc) |
| 52 | { |
| 53 | spin_lock(&zram->stat64_lock); |
| 54 | *v = *v + inc; |
| 55 | spin_unlock(&zram->stat64_lock); |
| 56 | } |
| 57 | |
| 58 | static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec) |
| 59 | { |
| 60 | spin_lock(&zram->stat64_lock); |
| 61 | *v = *v - dec; |
| 62 | spin_unlock(&zram->stat64_lock); |
| 63 | } |
| 64 | |
| 65 | static void zram_stat64_inc(struct zram *zram, u64 *v) |
| 66 | { |
| 67 | zram_stat64_add(zram, v, 1); |
| 68 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 69 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 70 | static int zram_test_flag(struct zram *zram, u32 index, |
| 71 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 72 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 73 | return zram->table[index].flags & BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 74 | } |
| 75 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 76 | static void zram_set_flag(struct zram *zram, u32 index, |
| 77 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 78 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 79 | zram->table[index].flags |= BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 80 | } |
| 81 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 82 | static void zram_clear_flag(struct zram *zram, u32 index, |
| 83 | enum zram_pageflags flag) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 84 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 85 | zram->table[index].flags &= ~BIT(flag); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | static int page_zero_filled(void *ptr) |
| 89 | { |
| 90 | unsigned int pos; |
| 91 | unsigned long *page; |
| 92 | |
| 93 | page = (unsigned long *)ptr; |
| 94 | |
| 95 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { |
| 96 | if (page[pos]) |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | return 1; |
| 101 | } |
| 102 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 103 | static void zram_set_disksize(struct zram *zram, size_t totalram_bytes) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 104 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 105 | if (!zram->disksize) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 106 | pr_info( |
| 107 | "disk size not provided. You can use disksize_kb module " |
| 108 | "param to specify size.\nUsing default: (%u%% of RAM).\n", |
| 109 | default_disksize_perc_ram |
| 110 | ); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 111 | zram->disksize = default_disksize_perc_ram * |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 112 | (totalram_bytes / 100); |
| 113 | } |
| 114 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 115 | if (zram->disksize > 2 * (totalram_bytes)) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 116 | pr_info( |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 117 | "There is little point creating a zram of greater than " |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 118 | "twice the size of memory since we expect a 2:1 compression " |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 119 | "ratio. Note that zram uses about 0.1%% of the size of " |
| 120 | "the disk when not in use so a huge zram is " |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 121 | "wasteful.\n" |
| 122 | "\tMemory Size: %zu kB\n" |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 123 | "\tSize you selected: %llu kB\n" |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 124 | "Continuing anyway ...\n", |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 125 | totalram_bytes >> 10, zram->disksize |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 126 | ); |
| 127 | } |
| 128 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 129 | zram->disksize &= PAGE_MASK; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 130 | } |
| 131 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 132 | static void zram_free_page(struct zram *zram, size_t index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 133 | { |
| 134 | u32 clen; |
| 135 | void *obj; |
| 136 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 137 | struct page *page = zram->table[index].page; |
| 138 | u32 offset = zram->table[index].offset; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 139 | |
| 140 | if (unlikely(!page)) { |
Nitin Gupta | 2e88228 | 2010-01-28 21:13:41 +0530 | [diff] [blame] | 141 | /* |
| 142 | * No memory is allocated for zero filled pages. |
| 143 | * Simply clear zero page flag. |
| 144 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 145 | if (zram_test_flag(zram, index, ZRAM_ZERO)) { |
| 146 | zram_clear_flag(zram, index, ZRAM_ZERO); |
| 147 | zram_stat_dec(&zram->stats.pages_zero); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 148 | } |
| 149 | return; |
| 150 | } |
| 151 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 152 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 153 | clen = PAGE_SIZE; |
| 154 | __free_page(page); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 155 | zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED); |
| 156 | zram_stat_dec(&zram->stats.pages_expand); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 157 | goto out; |
| 158 | } |
| 159 | |
| 160 | obj = kmap_atomic(page, KM_USER0) + offset; |
| 161 | clen = xv_get_object_size(obj) - sizeof(struct zobj_header); |
| 162 | kunmap_atomic(obj, KM_USER0); |
| 163 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 164 | xv_free(zram->mem_pool, page, offset); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 165 | if (clen <= PAGE_SIZE / 2) |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 166 | zram_stat_dec(&zram->stats.good_compress); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 167 | |
| 168 | out: |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 169 | zram_stat64_sub(zram, &zram->stats.compr_size, clen); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 170 | zram_stat_dec(&zram->stats.pages_stored); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 171 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 172 | zram->table[index].page = NULL; |
| 173 | zram->table[index].offset = 0; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 174 | } |
| 175 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 176 | static void handle_zero_page(struct page *page) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 177 | { |
| 178 | void *user_mem; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 179 | |
| 180 | user_mem = kmap_atomic(page, KM_USER0); |
| 181 | memset(user_mem, 0, PAGE_SIZE); |
| 182 | kunmap_atomic(user_mem, KM_USER0); |
| 183 | |
Nitin Gupta | 30fb8a7 | 2009-12-12 11:44:46 +0530 | [diff] [blame] | 184 | flush_dcache_page(page); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 185 | } |
| 186 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 187 | static void handle_uncompressed_page(struct zram *zram, |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 188 | struct page *page, u32 index) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 189 | { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 190 | unsigned char *user_mem, *cmem; |
| 191 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 192 | user_mem = kmap_atomic(page, KM_USER0); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 193 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + |
| 194 | zram->table[index].offset; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 195 | |
| 196 | memcpy(user_mem, cmem, PAGE_SIZE); |
| 197 | kunmap_atomic(user_mem, KM_USER0); |
| 198 | kunmap_atomic(cmem, KM_USER1); |
| 199 | |
Nitin Gupta | 30fb8a7 | 2009-12-12 11:44:46 +0530 | [diff] [blame] | 200 | flush_dcache_page(page); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 201 | } |
| 202 | |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 203 | static void zram_read(struct zram *zram, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 204 | { |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 205 | |
| 206 | int i; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 207 | u32 index; |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 208 | struct bio_vec *bvec; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 209 | |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 210 | if (unlikely(!zram->init_done)) { |
Jerome Marchand | 1aa3266 | 2010-12-17 17:03:15 +0100 | [diff] [blame] | 211 | bio_endio(bio, -ENXIO); |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 212 | return; |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 213 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 214 | |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 215 | zram_stat64_inc(zram, &zram->stats.num_reads); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 216 | index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 217 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 218 | bio_for_each_segment(bvec, bio, i) { |
| 219 | int ret; |
| 220 | size_t clen; |
| 221 | struct page *page; |
| 222 | struct zobj_header *zheader; |
| 223 | unsigned char *user_mem, *cmem; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 224 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 225 | page = bvec->bv_page; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 226 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 227 | if (zram_test_flag(zram, index, ZRAM_ZERO)) { |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 228 | handle_zero_page(page); |
| 229 | continue; |
| 230 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 231 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 232 | /* Requested page is not present in compressed area */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 233 | if (unlikely(!zram->table[index].page)) { |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 234 | pr_debug("Read before write: sector=%lu, size=%u", |
| 235 | (ulong)(bio->bi_sector), bio->bi_size); |
| 236 | /* Do nothing */ |
| 237 | continue; |
| 238 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 239 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 240 | /* Page is stored uncompressed since it's incompressible */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 241 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { |
| 242 | handle_uncompressed_page(zram, page, index); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 243 | continue; |
| 244 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 245 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 246 | user_mem = kmap_atomic(page, KM_USER0); |
| 247 | clen = PAGE_SIZE; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 248 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 249 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + |
| 250 | zram->table[index].offset; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 251 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 252 | ret = lzo1x_decompress_safe( |
| 253 | cmem + sizeof(*zheader), |
| 254 | xv_get_object_size(cmem) - sizeof(*zheader), |
| 255 | user_mem, &clen); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 256 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 257 | kunmap_atomic(user_mem, KM_USER0); |
| 258 | kunmap_atomic(cmem, KM_USER1); |
| 259 | |
| 260 | /* Should NEVER happen. Return bio error if it does. */ |
| 261 | if (unlikely(ret != LZO_E_OK)) { |
| 262 | pr_err("Decompression failed! err=%d, page=%u\n", |
| 263 | ret, index); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 264 | zram_stat64_inc(zram, &zram->stats.failed_reads); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 265 | goto out; |
| 266 | } |
| 267 | |
| 268 | flush_dcache_page(page); |
| 269 | index++; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 270 | } |
| 271 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 272 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
| 273 | bio_endio(bio, 0); |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 274 | return; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 275 | |
| 276 | out: |
| 277 | bio_io_error(bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 278 | } |
| 279 | |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 280 | static void zram_write(struct zram *zram, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 281 | { |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 282 | int i, ret; |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 283 | u32 index; |
| 284 | struct bio_vec *bvec; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 285 | |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 286 | if (unlikely(!zram->init_done)) { |
| 287 | ret = zram_init_device(zram); |
| 288 | if (ret) |
| 289 | goto out; |
| 290 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 291 | |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 292 | zram_stat64_inc(zram, &zram->stats.num_writes); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 293 | index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; |
| 294 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 295 | bio_for_each_segment(bvec, bio, i) { |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 296 | u32 offset; |
| 297 | size_t clen; |
| 298 | struct zobj_header *zheader; |
| 299 | struct page *page, *page_store; |
| 300 | unsigned char *user_mem, *cmem, *src; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 301 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 302 | page = bvec->bv_page; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 303 | src = zram->compress_buffer; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 304 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 305 | /* |
| 306 | * System overwrites unused sectors. Free memory associated |
| 307 | * with this sector now. |
| 308 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 309 | if (zram->table[index].page || |
| 310 | zram_test_flag(zram, index, ZRAM_ZERO)) |
| 311 | zram_free_page(zram, index); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 312 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 313 | mutex_lock(&zram->lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 314 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 315 | user_mem = kmap_atomic(page, KM_USER0); |
| 316 | if (page_zero_filled(user_mem)) { |
| 317 | kunmap_atomic(user_mem, KM_USER0); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 318 | mutex_unlock(&zram->lock); |
| 319 | zram_stat_inc(&zram->stats.pages_zero); |
| 320 | zram_set_flag(zram, index, ZRAM_ZERO); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 321 | continue; |
| 322 | } |
| 323 | |
| 324 | ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen, |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 325 | zram->compress_workmem); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 326 | |
| 327 | kunmap_atomic(user_mem, KM_USER0); |
| 328 | |
| 329 | if (unlikely(ret != LZO_E_OK)) { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 330 | mutex_unlock(&zram->lock); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 331 | pr_err("Compression failed! err=%d\n", ret); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 332 | zram_stat64_inc(zram, &zram->stats.failed_writes); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 333 | goto out; |
| 334 | } |
| 335 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 336 | /* |
| 337 | * Page is incompressible. Store it as-is (uncompressed) |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 338 | * since we do not want to return too many disk write |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 339 | * errors which has side effect of hanging the system. |
| 340 | */ |
| 341 | if (unlikely(clen > max_zpage_size)) { |
| 342 | clen = PAGE_SIZE; |
| 343 | page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM); |
| 344 | if (unlikely(!page_store)) { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 345 | mutex_unlock(&zram->lock); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 346 | pr_info("Error allocating memory for " |
| 347 | "incompressible page: %u\n", index); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 348 | zram_stat64_inc(zram, |
| 349 | &zram->stats.failed_writes); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 350 | goto out; |
| 351 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 352 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 353 | offset = 0; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 354 | zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); |
| 355 | zram_stat_inc(&zram->stats.pages_expand); |
| 356 | zram->table[index].page = page_store; |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 357 | src = kmap_atomic(page, KM_USER0); |
| 358 | goto memstore; |
| 359 | } |
| 360 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 361 | if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), |
| 362 | &zram->table[index].page, &offset, |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 363 | GFP_NOIO | __GFP_HIGHMEM)) { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 364 | mutex_unlock(&zram->lock); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 365 | pr_info("Error allocating memory for compressed " |
| 366 | "page: %u, size=%zu\n", index, clen); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 367 | zram_stat64_inc(zram, &zram->stats.failed_writes); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 368 | goto out; |
| 369 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 370 | |
| 371 | memstore: |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 372 | zram->table[index].offset = offset; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 373 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 374 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + |
| 375 | zram->table[index].offset; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 376 | |
| 377 | #if 0 |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 378 | /* Back-reference needed for memory defragmentation */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 379 | if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 380 | zheader = (struct zobj_header *)cmem; |
| 381 | zheader->table_idx = index; |
| 382 | cmem += sizeof(*zheader); |
| 383 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 384 | #endif |
| 385 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 386 | memcpy(cmem, src, clen); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 387 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 388 | kunmap_atomic(cmem, KM_USER1); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 389 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 390 | kunmap_atomic(src, KM_USER0); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 391 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 392 | /* Update stats */ |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 393 | zram_stat64_add(zram, &zram->stats.compr_size, clen); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 394 | zram_stat_inc(&zram->stats.pages_stored); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 395 | if (clen <= PAGE_SIZE / 2) |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 396 | zram_stat_inc(&zram->stats.good_compress); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 397 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 398 | mutex_unlock(&zram->lock); |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 399 | index++; |
| 400 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 401 | |
| 402 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
| 403 | bio_endio(bio, 0); |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 404 | return; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 405 | |
| 406 | out: |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 407 | bio_io_error(bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 408 | } |
| 409 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 410 | /* |
| 411 | * Check if request is within bounds and page aligned. |
| 412 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 413 | static inline int valid_io_request(struct zram *zram, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 414 | { |
| 415 | if (unlikely( |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 416 | (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) || |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 417 | (bio->bi_sector & (SECTORS_PER_PAGE - 1)) || |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 418 | (bio->bi_size & (PAGE_SIZE - 1)))) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 419 | |
| 420 | return 0; |
| 421 | } |
| 422 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 423 | /* I/O request is valid */ |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 424 | return 1; |
| 425 | } |
| 426 | |
| 427 | /* |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 428 | * Handler function for all zram I/O requests. |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 429 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 430 | static int zram_make_request(struct request_queue *queue, struct bio *bio) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 431 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 432 | struct zram *zram = queue->queuedata; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 433 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 434 | if (!valid_io_request(zram, bio)) { |
| 435 | zram_stat64_inc(zram, &zram->stats.invalid_io); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 436 | bio_io_error(bio); |
| 437 | return 0; |
| 438 | } |
| 439 | |
| 440 | switch (bio_data_dir(bio)) { |
| 441 | case READ: |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 442 | zram_read(zram, bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 443 | break; |
| 444 | |
| 445 | case WRITE: |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 446 | zram_write(zram, bio); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 447 | break; |
| 448 | } |
| 449 | |
Nitin Gupta | 7d7854b | 2011-01-22 07:36:15 -0500 | [diff] [blame^] | 450 | return 0; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 451 | } |
| 452 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 453 | void zram_reset_device(struct zram *zram) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 454 | { |
Nitin Gupta | 97a0638 | 2010-05-13 14:24:21 +0530 | [diff] [blame] | 455 | size_t index; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 456 | |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 457 | mutex_lock(&zram->init_lock); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 458 | zram->init_done = 0; |
Nitin Gupta | 7eef753 | 2010-01-28 21:13:38 +0530 | [diff] [blame] | 459 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 460 | /* Free various per-device buffers */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 461 | kfree(zram->compress_workmem); |
| 462 | free_pages((unsigned long)zram->compress_buffer, 1); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 463 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 464 | zram->compress_workmem = NULL; |
| 465 | zram->compress_buffer = NULL; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 466 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 467 | /* Free all pages that are still in this zram device */ |
| 468 | for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 469 | struct page *page; |
| 470 | u16 offset; |
| 471 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 472 | page = zram->table[index].page; |
| 473 | offset = zram->table[index].offset; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 474 | |
| 475 | if (!page) |
| 476 | continue; |
| 477 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 478 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 479 | __free_page(page); |
| 480 | else |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 481 | xv_free(zram->mem_pool, page, offset); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 482 | } |
| 483 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 484 | vfree(zram->table); |
| 485 | zram->table = NULL; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 486 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 487 | xv_destroy_pool(zram->mem_pool); |
| 488 | zram->mem_pool = NULL; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 489 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 490 | /* Reset stats */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 491 | memset(&zram->stats, 0, sizeof(zram->stats)); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 492 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 493 | zram->disksize = 0; |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 494 | mutex_unlock(&zram->init_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 495 | } |
| 496 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 497 | int zram_init_device(struct zram *zram) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 498 | { |
| 499 | int ret; |
| 500 | size_t num_pages; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 501 | |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 502 | mutex_lock(&zram->init_lock); |
| 503 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 504 | if (zram->init_done) { |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 505 | mutex_unlock(&zram->init_lock); |
| 506 | return 0; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 507 | } |
| 508 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 509 | zram_set_disksize(zram, totalram_pages << PAGE_SHIFT); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 510 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 511 | zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); |
| 512 | if (!zram->compress_workmem) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 513 | pr_err("Error allocating compressor working memory!\n"); |
| 514 | ret = -ENOMEM; |
| 515 | goto fail; |
| 516 | } |
| 517 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 518 | zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1); |
| 519 | if (!zram->compress_buffer) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 520 | pr_err("Error allocating compressor buffer space\n"); |
| 521 | ret = -ENOMEM; |
| 522 | goto fail; |
| 523 | } |
| 524 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 525 | num_pages = zram->disksize >> PAGE_SHIFT; |
Joe Perches | 5b84cc7 | 2010-11-04 20:07:59 -0700 | [diff] [blame] | 526 | zram->table = vzalloc(num_pages * sizeof(*zram->table)); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 527 | if (!zram->table) { |
| 528 | pr_err("Error allocating zram address table\n"); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 529 | /* To prevent accessing table entries during cleanup */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 530 | zram->disksize = 0; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 531 | ret = -ENOMEM; |
| 532 | goto fail; |
| 533 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 534 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 535 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 536 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 537 | /* zram devices sort of resembles non-rotational disks */ |
| 538 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 539 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 540 | zram->mem_pool = xv_create_pool(); |
| 541 | if (!zram->mem_pool) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 542 | pr_err("Error creating memory pool\n"); |
| 543 | ret = -ENOMEM; |
| 544 | goto fail; |
| 545 | } |
| 546 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 547 | zram->init_done = 1; |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 548 | mutex_unlock(&zram->init_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 549 | |
| 550 | pr_debug("Initialization done!\n"); |
| 551 | return 0; |
| 552 | |
| 553 | fail: |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 554 | mutex_unlock(&zram->init_lock); |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 555 | zram_reset_device(zram); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 556 | |
| 557 | pr_err("Initialization failed: err=%d\n", ret); |
| 558 | return ret; |
| 559 | } |
| 560 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 561 | void zram_slot_free_notify(struct block_device *bdev, unsigned long index) |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 562 | { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 563 | struct zram *zram; |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 564 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 565 | zram = bdev->bd_disk->private_data; |
| 566 | zram_free_page(zram, index); |
| 567 | zram_stat64_inc(zram, &zram->stats.notify_free); |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 568 | } |
| 569 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 570 | static const struct block_device_operations zram_devops = { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 571 | .swap_slot_free_notify = zram_slot_free_notify, |
Nitin Gupta | 107c161 | 2010-05-17 11:02:44 +0530 | [diff] [blame] | 572 | .owner = THIS_MODULE |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 573 | }; |
| 574 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 575 | static int create_device(struct zram *zram, int device_id) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 576 | { |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 577 | int ret = 0; |
| 578 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 579 | mutex_init(&zram->lock); |
Nitin Gupta | 484875a | 2010-08-09 22:56:48 +0530 | [diff] [blame] | 580 | mutex_init(&zram->init_lock); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 581 | spin_lock_init(&zram->stat64_lock); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 582 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 583 | zram->queue = blk_alloc_queue(GFP_KERNEL); |
| 584 | if (!zram->queue) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 585 | pr_err("Error allocating disk queue for device %d\n", |
| 586 | device_id); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 587 | ret = -ENOMEM; |
| 588 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 589 | } |
| 590 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 591 | blk_queue_make_request(zram->queue, zram_make_request); |
| 592 | zram->queue->queuedata = zram; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 593 | |
| 594 | /* gendisk structure */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 595 | zram->disk = alloc_disk(1); |
| 596 | if (!zram->disk) { |
| 597 | blk_cleanup_queue(zram->queue); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 598 | pr_warning("Error allocating disk structure for device %d\n", |
| 599 | device_id); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 600 | ret = -ENOMEM; |
| 601 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 602 | } |
| 603 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 604 | zram->disk->major = zram_major; |
| 605 | zram->disk->first_minor = device_id; |
| 606 | zram->disk->fops = &zram_devops; |
| 607 | zram->disk->queue = zram->queue; |
| 608 | zram->disk->private_data = zram; |
| 609 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 610 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 611 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 612 | set_capacity(zram->disk, 0); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 613 | |
Nitin Gupta | a1dd52a | 2010-06-01 13:31:23 +0530 | [diff] [blame] | 614 | /* |
| 615 | * To ensure that we always get PAGE_SIZE aligned |
| 616 | * and n*PAGE_SIZED sized I/O requests. |
| 617 | */ |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 618 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
| 619 | blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE); |
| 620 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
| 621 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); |
Nitin Gupta | 5d83d5a | 2010-01-28 21:13:39 +0530 | [diff] [blame] | 622 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 623 | add_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 624 | |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 625 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
| 626 | &zram_disk_attr_group); |
| 627 | if (ret < 0) { |
| 628 | pr_warning("Error creating sysfs group"); |
| 629 | goto out; |
| 630 | } |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 631 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 632 | zram->init_done = 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 633 | |
| 634 | out: |
| 635 | return ret; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 636 | } |
| 637 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 638 | static void destroy_device(struct zram *zram) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 639 | { |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 640 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, |
| 641 | &zram_disk_attr_group); |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 642 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 643 | if (zram->disk) { |
| 644 | del_gendisk(zram->disk); |
| 645 | put_disk(zram->disk); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 646 | } |
| 647 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 648 | if (zram->queue) |
| 649 | blk_cleanup_queue(zram->queue); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 650 | } |
| 651 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 652 | static int __init zram_init(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 653 | { |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 654 | int ret, dev_id; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 655 | |
| 656 | if (num_devices > max_num_devices) { |
| 657 | pr_warning("Invalid value for num_devices: %u\n", |
| 658 | num_devices); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 659 | ret = -EINVAL; |
| 660 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 661 | } |
| 662 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 663 | zram_major = register_blkdev(0, "zram"); |
| 664 | if (zram_major <= 0) { |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 665 | pr_warning("Unable to get major number\n"); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 666 | ret = -EBUSY; |
| 667 | goto out; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 668 | } |
| 669 | |
| 670 | if (!num_devices) { |
| 671 | pr_info("num_devices not specified. Using default: 1\n"); |
| 672 | num_devices = 1; |
| 673 | } |
| 674 | |
| 675 | /* Allocate the device array and initialize each one */ |
| 676 | pr_info("Creating %u devices ...\n", num_devices); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 677 | devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 678 | if (!devices) { |
| 679 | ret = -ENOMEM; |
| 680 | goto unregister; |
| 681 | } |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 682 | |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 683 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
| 684 | ret = create_device(&devices[dev_id], dev_id); |
| 685 | if (ret) |
Minchan Kim | 3bf040c | 2010-01-11 16:15:53 +0900 | [diff] [blame] | 686 | goto free_devices; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 687 | } |
| 688 | |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 689 | return 0; |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 690 | |
Minchan Kim | 3bf040c | 2010-01-11 16:15:53 +0900 | [diff] [blame] | 691 | free_devices: |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 692 | while (dev_id) |
| 693 | destroy_device(&devices[--dev_id]); |
Shahar Havivi | 273ad8d | 2010-08-28 10:09:05 +0300 | [diff] [blame] | 694 | kfree(devices); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 695 | unregister: |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 696 | unregister_blkdev(zram_major, "zram"); |
Nitin Gupta | de1a21a | 2010-01-28 21:13:40 +0530 | [diff] [blame] | 697 | out: |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 698 | return ret; |
| 699 | } |
| 700 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 701 | static void __exit zram_exit(void) |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 702 | { |
| 703 | int i; |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 704 | struct zram *zram; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 705 | |
| 706 | for (i = 0; i < num_devices; i++) { |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 707 | zram = &devices[i]; |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 708 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 709 | destroy_device(zram); |
| 710 | if (zram->init_done) |
Nitin Gupta | 33863c2 | 2010-08-09 22:56:47 +0530 | [diff] [blame] | 711 | zram_reset_device(zram); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 712 | } |
| 713 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 714 | unregister_blkdev(zram_major, "zram"); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 715 | |
| 716 | kfree(devices); |
| 717 | pr_debug("Cleanup done!\n"); |
| 718 | } |
| 719 | |
| 720 | module_param(num_devices, uint, 0); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 721 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 722 | |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 723 | module_init(zram_init); |
| 724 | module_exit(zram_exit); |
Nitin Gupta | 306b0c9 | 2009-09-22 10:26:53 +0530 | [diff] [blame] | 725 | |
| 726 | MODULE_LICENSE("Dual BSD/GPL"); |
| 727 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |
Nitin Gupta | f1e3cff | 2010-06-01 13:31:25 +0530 | [diff] [blame] | 728 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |