David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 1 | /* bounce buffer handling for block devices |
| 2 | * |
| 3 | * - Split from highmem.c |
| 4 | */ |
| 5 | |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 8 | #include <linux/mm.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 9 | #include <linux/export.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 10 | #include <linux/swap.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 11 | #include <linux/gfp.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 12 | #include <linux/bio.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/mempool.h> |
| 15 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 16 | #include <linux/backing-dev.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 17 | #include <linux/init.h> |
| 18 | #include <linux/hash.h> |
| 19 | #include <linux/highmem.h> |
David Vrabel | 3bcfeaf | 2011-10-20 21:24:30 +0200 | [diff] [blame] | 20 | #include <linux/bootmem.h> |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 21 | #include <linux/printk.h> |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 22 | #include <asm/tlbflush.h> |
| 23 | |
Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 24 | #include <trace/events/block.h> |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 25 | #include "blk.h" |
Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 26 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 27 | #define POOL_SIZE 64 |
| 28 | #define ISA_POOL_SIZE 16 |
| 29 | |
Bart Van Assche | e0fc443a | 2017-06-21 10:55:45 -0700 | [diff] [blame] | 30 | static struct bio_set *bounce_bio_set, *bounce_bio_split; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 31 | static mempool_t *page_pool, *isa_page_pool; |
| 32 | |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 33 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 34 | static __init int init_emergency_pool(void) |
| 35 | { |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 36 | #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) |
David Vrabel | 3bcfeaf | 2011-10-20 21:24:30 +0200 | [diff] [blame] | 37 | if (max_pfn <= max_low_pfn) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 38 | return 0; |
David Vrabel | 3bcfeaf | 2011-10-20 21:24:30 +0200 | [diff] [blame] | 39 | #endif |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 40 | |
| 41 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); |
| 42 | BUG_ON(!page_pool); |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 43 | pr_info("pool size: %d pages\n", POOL_SIZE); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 44 | |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 45 | bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
| 46 | BUG_ON(!bounce_bio_set); |
| 47 | if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE)) |
| 48 | BUG_ON(1); |
| 49 | |
| 50 | bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0); |
| 51 | BUG_ON(!bounce_bio_split); |
| 52 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 53 | return 0; |
| 54 | } |
| 55 | |
| 56 | __initcall(init_emergency_pool); |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 57 | #endif |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 58 | |
Chris Metcalf | f100625 | 2012-06-16 16:41:05 -0400 | [diff] [blame] | 59 | #ifdef CONFIG_HIGHMEM |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 60 | /* |
| 61 | * highmem version, map in to vec |
| 62 | */ |
| 63 | static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) |
| 64 | { |
| 65 | unsigned long flags; |
| 66 | unsigned char *vto; |
| 67 | |
| 68 | local_irq_save(flags); |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 69 | vto = kmap_atomic(to->bv_page); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 70 | memcpy(vto + to->bv_offset, vfrom, to->bv_len); |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 71 | kunmap_atomic(vto); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 72 | local_irq_restore(flags); |
| 73 | } |
| 74 | |
| 75 | #else /* CONFIG_HIGHMEM */ |
| 76 | |
| 77 | #define bounce_copy_vec(to, vfrom) \ |
| 78 | memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) |
| 79 | |
| 80 | #endif /* CONFIG_HIGHMEM */ |
| 81 | |
| 82 | /* |
| 83 | * allocate pages in the DMA region for the ISA pool |
| 84 | */ |
| 85 | static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) |
| 86 | { |
| 87 | return mempool_alloc_pages(gfp_mask | GFP_DMA, data); |
| 88 | } |
| 89 | |
| 90 | /* |
| 91 | * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA |
| 92 | * as the max address, so check if the pool has already been created. |
| 93 | */ |
| 94 | int init_emergency_isa_pool(void) |
| 95 | { |
| 96 | if (isa_page_pool) |
| 97 | return 0; |
| 98 | |
| 99 | isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, |
| 100 | mempool_free_pages, (void *) 0); |
| 101 | BUG_ON(!isa_page_pool); |
| 102 | |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 103 | pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * Simple bounce buffer support for highmem pages. Depending on the |
| 109 | * queue gfp mask set, *to may or may not be a highmem page. kmap it |
| 110 | * always, it will do the Right Thing |
| 111 | */ |
| 112 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) |
| 113 | { |
| 114 | unsigned char *vfrom; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 115 | struct bio_vec tovec, *fromvec = from->bi_io_vec; |
| 116 | struct bvec_iter iter; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 117 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 118 | bio_for_each_segment(tovec, to, iter) { |
| 119 | if (tovec.bv_page != fromvec->bv_page) { |
| 120 | /* |
| 121 | * fromvec->bv_offset and fromvec->bv_len might have |
| 122 | * been modified by the block layer, so use the original |
| 123 | * copy, bounce_copy_vec already uses tovec->bv_len |
| 124 | */ |
| 125 | vfrom = page_address(fromvec->bv_page) + |
| 126 | tovec.bv_offset; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 127 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 128 | bounce_copy_vec(&tovec, vfrom); |
| 129 | flush_dcache_page(tovec.bv_page); |
| 130 | } |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 131 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 132 | fromvec++; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 133 | } |
| 134 | } |
| 135 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 136 | static void bounce_end_io(struct bio *bio, mempool_t *pool) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 137 | { |
| 138 | struct bio *bio_orig = bio->bi_private; |
| 139 | struct bio_vec *bvec, *org_vec; |
| 140 | int i; |
Ming Lei | 9945187 | 2015-09-18 00:06:28 +0800 | [diff] [blame] | 141 | int start = bio_orig->bi_iter.bi_idx; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 142 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 143 | /* |
| 144 | * free up bounce indirect pages used |
| 145 | */ |
Kent Overstreet | d74c6d5 | 2013-02-06 12:23:11 -0800 | [diff] [blame] | 146 | bio_for_each_segment_all(bvec, bio, i) { |
Ming Lei | 9945187 | 2015-09-18 00:06:28 +0800 | [diff] [blame] | 147 | org_vec = bio_orig->bi_io_vec + i + start; |
| 148 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 149 | if (bvec->bv_page == org_vec->bv_page) |
| 150 | continue; |
| 151 | |
| 152 | dec_zone_page_state(bvec->bv_page, NR_BOUNCE); |
| 153 | mempool_free(bvec->bv_page, pool); |
| 154 | } |
| 155 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 156 | bio_orig->bi_status = bio->bi_status; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 157 | bio_endio(bio_orig); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 158 | bio_put(bio); |
| 159 | } |
| 160 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 161 | static void bounce_end_io_write(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 162 | { |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 163 | bounce_end_io(bio, page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 164 | } |
| 165 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 166 | static void bounce_end_io_write_isa(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 167 | { |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 168 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 169 | bounce_end_io(bio, isa_page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 170 | } |
| 171 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 172 | static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 173 | { |
| 174 | struct bio *bio_orig = bio->bi_private; |
| 175 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 176 | if (!bio->bi_status) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 177 | copy_to_high_bio_irq(bio_orig, bio); |
| 178 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 179 | bounce_end_io(bio, pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 180 | } |
| 181 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 182 | static void bounce_end_io_read(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 183 | { |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 184 | __bounce_end_io_read(bio, page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 185 | } |
| 186 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 187 | static void bounce_end_io_read_isa(struct bio *bio) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 188 | { |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 189 | __bounce_end_io_read(bio, isa_page_pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 190 | } |
| 191 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 192 | static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, |
Jan Kara | a3ad0a9 | 2015-06-18 17:19:14 +0200 | [diff] [blame] | 193 | mempool_t *pool) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 194 | { |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 195 | struct bio *bio; |
| 196 | int rw = bio_data_dir(*bio_orig); |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 197 | struct bio_vec *to, from; |
| 198 | struct bvec_iter iter; |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 199 | unsigned i = 0; |
| 200 | bool bounce = false; |
| 201 | int sectors = 0; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 202 | |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 203 | bio_for_each_segment(from, *bio_orig, iter) { |
| 204 | if (i++ < BIO_MAX_PAGES) |
| 205 | sectors += from.bv_len >> 9; |
Christoph Hellwig | 1c4bc3a | 2017-06-19 09:26:22 +0200 | [diff] [blame] | 206 | if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 207 | bounce = true; |
| 208 | } |
| 209 | if (!bounce) |
| 210 | return; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 211 | |
NeilBrown | a8821f3 | 2017-06-18 14:38:58 +1000 | [diff] [blame] | 212 | if (sectors < bio_sectors(*bio_orig)) { |
| 213 | bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); |
| 214 | bio_chain(bio, *bio_orig); |
| 215 | generic_make_request(*bio_orig); |
| 216 | *bio_orig = bio; |
| 217 | } |
| 218 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 219 | |
Kent Overstreet | cb34e05 | 2012-09-05 15:22:02 -0700 | [diff] [blame] | 220 | bio_for_each_segment_all(to, bio, i) { |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 221 | struct page *page = to->bv_page; |
| 222 | |
Christoph Hellwig | 1c4bc3a | 2017-06-19 09:26:22 +0200 | [diff] [blame] | 223 | if (page_to_pfn(page) <= q->limits.bounce_pfn) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 224 | continue; |
| 225 | |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 226 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); |
Wang YanQing | 393a339 | 2015-04-26 16:43:31 +0800 | [diff] [blame] | 227 | inc_zone_page_state(to->bv_page, NR_BOUNCE); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 228 | |
| 229 | if (rw == WRITE) { |
| 230 | char *vto, *vfrom; |
| 231 | |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 232 | flush_dcache_page(page); |
| 233 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 234 | vto = page_address(to->bv_page) + to->bv_offset; |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 235 | vfrom = kmap_atomic(page) + to->bv_offset; |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 236 | memcpy(vto, vfrom, to->bv_len); |
Kent Overstreet | 6bc454d | 2012-09-10 14:30:37 -0700 | [diff] [blame] | 237 | kunmap_atomic(vfrom); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 238 | } |
| 239 | } |
| 240 | |
Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 241 | trace_block_bio_bounce(q, *bio_orig); |
Jens Axboe | c43a508 | 2007-01-12 12:20:26 +0100 | [diff] [blame] | 242 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 243 | bio->bi_flags |= (1 << BIO_BOUNCED); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 244 | |
| 245 | if (pool == page_pool) { |
| 246 | bio->bi_end_io = bounce_end_io_write; |
| 247 | if (rw == READ) |
| 248 | bio->bi_end_io = bounce_end_io_read; |
| 249 | } else { |
| 250 | bio->bi_end_io = bounce_end_io_write_isa; |
| 251 | if (rw == READ) |
| 252 | bio->bi_end_io = bounce_end_io_read_isa; |
| 253 | } |
| 254 | |
| 255 | bio->bi_private = *bio_orig; |
| 256 | *bio_orig = bio; |
| 257 | } |
| 258 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 259 | void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 260 | { |
| 261 | mempool_t *pool; |
| 262 | |
| 263 | /* |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 264 | * Data-less bio, nothing to bounce |
| 265 | */ |
Jens Axboe | 3614407 | 2008-08-14 13:12:15 +0200 | [diff] [blame] | 266 | if (!bio_has_data(*bio_orig)) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 267 | return; |
| 268 | |
| 269 | /* |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 270 | * for non-isa bounce case, just check if the bounce pfn is equal |
| 271 | * to or bigger than the highest pfn in the system -- in that case, |
| 272 | * don't waste time iterating over bio segments |
| 273 | */ |
| 274 | if (!(q->bounce_gfp & GFP_DMA)) { |
Christoph Hellwig | 1c4bc3a | 2017-06-19 09:26:22 +0200 | [diff] [blame] | 275 | if (q->limits.bounce_pfn >= blk_max_pfn) |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 276 | return; |
| 277 | pool = page_pool; |
| 278 | } else { |
| 279 | BUG_ON(!isa_page_pool); |
| 280 | pool = isa_page_pool; |
| 281 | } |
| 282 | |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 283 | /* |
| 284 | * slow path |
| 285 | */ |
Jan Kara | a3ad0a9 | 2015-06-18 17:19:14 +0200 | [diff] [blame] | 286 | __blk_queue_bounce(q, bio_orig, pool); |
David Howells | 831058d | 2006-08-29 19:06:00 +0100 | [diff] [blame] | 287 | } |