blob: 6a3e68292273b03d3aa12a82a4a5af4979e95d60 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Howells831058d2006-08-29 19:06:00 +01002/* bounce buffer handling for block devices
3 *
4 * - Split from highmem.c
5 */
6
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Howells831058d2006-08-29 19:06:00 +01009#include <linux/mm.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040010#include <linux/export.h>
David Howells831058d2006-08-29 19:06:00 +010011#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
David Howells831058d2006-08-29 19:06:00 +010013#include <linux/bio.h>
14#include <linux/pagemap.h>
15#include <linux/mempool.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
David Howells831058d2006-08-29 19:06:00 +010018#include <linux/init.h>
19#include <linux/hash.h>
20#include <linux/highmem.h>
David Vrabel3bcfeaf2011-10-20 21:24:30 +020021#include <linux/bootmem.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070022#include <linux/printk.h>
David Howells831058d2006-08-29 19:06:00 +010023#include <asm/tlbflush.h>
24
Li Zefan55782132009-06-09 13:43:05 +080025#include <trace/events/block.h>
Christoph Hellwig3bce0162017-06-19 09:26:21 +020026#include "blk.h"
Li Zefan55782132009-06-09 13:43:05 +080027
David Howells831058d2006-08-29 19:06:00 +010028#define POOL_SIZE 64
29#define ISA_POOL_SIZE 16
30
Bart Van Asschee0fc443a2017-06-21 10:55:45 -070031static struct bio_set *bounce_bio_set, *bounce_bio_split;
David Howells831058d2006-08-29 19:06:00 +010032static mempool_t *page_pool, *isa_page_pool;
33
Chris Metcalff1006252012-06-16 16:41:05 -040034#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
David Howells831058d2006-08-29 19:06:00 +010035static __init int init_emergency_pool(void)
36{
Chris Metcalff1006252012-06-16 16:41:05 -040037#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
David Vrabel3bcfeaf2011-10-20 21:24:30 +020038 if (max_pfn <= max_low_pfn)
David Howells831058d2006-08-29 19:06:00 +010039 return 0;
David Vrabel3bcfeaf2011-10-20 21:24:30 +020040#endif
David Howells831058d2006-08-29 19:06:00 +010041
42 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
43 BUG_ON(!page_pool);
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070044 pr_info("pool size: %d pages\n", POOL_SIZE);
David Howells831058d2006-08-29 19:06:00 +010045
NeilBrowna8821f32017-06-18 14:38:58 +100046 bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
47 BUG_ON(!bounce_bio_set);
48 if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
49 BUG_ON(1);
50
51 bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
52 BUG_ON(!bounce_bio_split);
53
David Howells831058d2006-08-29 19:06:00 +010054 return 0;
55}
56
57__initcall(init_emergency_pool);
Chris Metcalff1006252012-06-16 16:41:05 -040058#endif
David Howells831058d2006-08-29 19:06:00 +010059
Chris Metcalff1006252012-06-16 16:41:05 -040060#ifdef CONFIG_HIGHMEM
David Howells831058d2006-08-29 19:06:00 +010061/*
62 * highmem version, map in to vec
63 */
64static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
65{
66 unsigned long flags;
67 unsigned char *vto;
68
69 local_irq_save(flags);
Cong Wang9b04c5f2011-11-25 23:14:39 +080070 vto = kmap_atomic(to->bv_page);
David Howells831058d2006-08-29 19:06:00 +010071 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
Cong Wang9b04c5f2011-11-25 23:14:39 +080072 kunmap_atomic(vto);
David Howells831058d2006-08-29 19:06:00 +010073 local_irq_restore(flags);
74}
75
76#else /* CONFIG_HIGHMEM */
77
78#define bounce_copy_vec(to, vfrom) \
79 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
80
81#endif /* CONFIG_HIGHMEM */
82
83/*
84 * allocate pages in the DMA region for the ISA pool
85 */
86static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
87{
88 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
89}
90
91/*
92 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
93 * as the max address, so check if the pool has already been created.
94 */
95int init_emergency_isa_pool(void)
96{
97 if (isa_page_pool)
98 return 0;
99
100 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
101 mempool_free_pages, (void *) 0);
102 BUG_ON(!isa_page_pool);
103
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700104 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
David Howells831058d2006-08-29 19:06:00 +0100105 return 0;
106}
107
108/*
109 * Simple bounce buffer support for highmem pages. Depending on the
110 * queue gfp mask set, *to may or may not be a highmem page. kmap it
111 * always, it will do the Right Thing
112 */
113static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
114{
115 unsigned char *vfrom;
Ming Lei3c892a02017-12-18 20:22:07 +0800116 struct bio_vec tovec, fromvec;
Kent Overstreet79886132013-11-23 17:19:00 -0800117 struct bvec_iter iter;
Ming Lei3c892a02017-12-18 20:22:07 +0800118 /*
119 * The bio of @from is created by bounce, so we can iterate
120 * its bvec from start to end, but the @from->bi_iter can't be
121 * trusted because it might be changed by splitting.
122 */
123 struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
David Howells831058d2006-08-29 19:06:00 +0100124
Kent Overstreet79886132013-11-23 17:19:00 -0800125 bio_for_each_segment(tovec, to, iter) {
Ming Lei3c892a02017-12-18 20:22:07 +0800126 fromvec = bio_iter_iovec(from, from_iter);
127 if (tovec.bv_page != fromvec.bv_page) {
Kent Overstreet79886132013-11-23 17:19:00 -0800128 /*
129 * fromvec->bv_offset and fromvec->bv_len might have
130 * been modified by the block layer, so use the original
131 * copy, bounce_copy_vec already uses tovec->bv_len
132 */
Ming Lei3c892a02017-12-18 20:22:07 +0800133 vfrom = page_address(fromvec.bv_page) +
Kent Overstreet79886132013-11-23 17:19:00 -0800134 tovec.bv_offset;
David Howells831058d2006-08-29 19:06:00 +0100135
Kent Overstreet79886132013-11-23 17:19:00 -0800136 bounce_copy_vec(&tovec, vfrom);
137 flush_dcache_page(tovec.bv_page);
138 }
Ming Lei3c892a02017-12-18 20:22:07 +0800139 bio_advance_iter(from, &from_iter, tovec.bv_len);
David Howells831058d2006-08-29 19:06:00 +0100140 }
141}
142
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200143static void bounce_end_io(struct bio *bio, mempool_t *pool)
David Howells831058d2006-08-29 19:06:00 +0100144{
145 struct bio *bio_orig = bio->bi_private;
Ming Lei7891f052017-12-18 20:22:06 +0800146 struct bio_vec *bvec, orig_vec;
David Howells831058d2006-08-29 19:06:00 +0100147 int i;
Ming Lei7891f052017-12-18 20:22:06 +0800148 struct bvec_iter orig_iter = bio_orig->bi_iter;
David Howells831058d2006-08-29 19:06:00 +0100149
David Howells831058d2006-08-29 19:06:00 +0100150 /*
151 * free up bounce indirect pages used
152 */
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800153 bio_for_each_segment_all(bvec, bio, i) {
Ming Lei7891f052017-12-18 20:22:06 +0800154 orig_vec = bio_iter_iovec(bio_orig, orig_iter);
155 if (bvec->bv_page != orig_vec.bv_page) {
156 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
157 mempool_free(bvec->bv_page, pool);
158 }
159 bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
David Howells831058d2006-08-29 19:06:00 +0100160 }
161
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200162 bio_orig->bi_status = bio->bi_status;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200163 bio_endio(bio_orig);
David Howells831058d2006-08-29 19:06:00 +0100164 bio_put(bio);
165}
166
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200167static void bounce_end_io_write(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100168{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200169 bounce_end_io(bio, page_pool);
David Howells831058d2006-08-29 19:06:00 +0100170}
171
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200172static void bounce_end_io_write_isa(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100173{
David Howells831058d2006-08-29 19:06:00 +0100174
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200175 bounce_end_io(bio, isa_page_pool);
David Howells831058d2006-08-29 19:06:00 +0100176}
177
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200178static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
David Howells831058d2006-08-29 19:06:00 +0100179{
180 struct bio *bio_orig = bio->bi_private;
181
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200182 if (!bio->bi_status)
David Howells831058d2006-08-29 19:06:00 +0100183 copy_to_high_bio_irq(bio_orig, bio);
184
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200185 bounce_end_io(bio, pool);
David Howells831058d2006-08-29 19:06:00 +0100186}
187
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200188static void bounce_end_io_read(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100189{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200190 __bounce_end_io_read(bio, page_pool);
David Howells831058d2006-08-29 19:06:00 +0100191}
192
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200193static void bounce_end_io_read_isa(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100194{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200195 __bounce_end_io_read(bio, isa_page_pool);
David Howells831058d2006-08-29 19:06:00 +0100196}
197
Jens Axboe165125e2007-07-24 09:28:11 +0200198static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
Jan Karaa3ad0a92015-06-18 17:19:14 +0200199 mempool_t *pool)
David Howells831058d2006-08-29 19:06:00 +0100200{
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700201 struct bio *bio;
202 int rw = bio_data_dir(*bio_orig);
Kent Overstreet79886132013-11-23 17:19:00 -0800203 struct bio_vec *to, from;
204 struct bvec_iter iter;
NeilBrowna8821f32017-06-18 14:38:58 +1000205 unsigned i = 0;
206 bool bounce = false;
207 int sectors = 0;
Ming Lei14cb0dc2017-12-18 15:40:43 +0800208 bool passthrough = bio_is_passthrough(*bio_orig);
David Howells831058d2006-08-29 19:06:00 +0100209
NeilBrowna8821f32017-06-18 14:38:58 +1000210 bio_for_each_segment(from, *bio_orig, iter) {
211 if (i++ < BIO_MAX_PAGES)
212 sectors += from.bv_len >> 9;
Christoph Hellwig1c4bc3a2017-06-19 09:26:22 +0200213 if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
NeilBrowna8821f32017-06-18 14:38:58 +1000214 bounce = true;
215 }
216 if (!bounce)
217 return;
David Howells831058d2006-08-29 19:06:00 +0100218
Ming Lei14cb0dc2017-12-18 15:40:43 +0800219 if (!passthrough && sectors < bio_sectors(*bio_orig)) {
NeilBrowna8821f32017-06-18 14:38:58 +1000220 bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
221 bio_chain(bio, *bio_orig);
222 generic_make_request(*bio_orig);
223 *bio_orig = bio;
224 }
Ming Lei14cb0dc2017-12-18 15:40:43 +0800225 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
226 bounce_bio_set);
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700227
Kent Overstreetcb34e052012-09-05 15:22:02 -0700228 bio_for_each_segment_all(to, bio, i) {
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700229 struct page *page = to->bv_page;
230
Christoph Hellwig1c4bc3a2017-06-19 09:26:22 +0200231 if (page_to_pfn(page) <= q->limits.bounce_pfn)
David Howells831058d2006-08-29 19:06:00 +0100232 continue;
233
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700234 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
Wang YanQing393a3392015-04-26 16:43:31 +0800235 inc_zone_page_state(to->bv_page, NR_BOUNCE);
David Howells831058d2006-08-29 19:06:00 +0100236
237 if (rw == WRITE) {
238 char *vto, *vfrom;
239
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700240 flush_dcache_page(page);
241
David Howells831058d2006-08-29 19:06:00 +0100242 vto = page_address(to->bv_page) + to->bv_offset;
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700243 vfrom = kmap_atomic(page) + to->bv_offset;
David Howells831058d2006-08-29 19:06:00 +0100244 memcpy(vto, vfrom, to->bv_len);
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700245 kunmap_atomic(vfrom);
David Howells831058d2006-08-29 19:06:00 +0100246 }
247 }
248
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100249 trace_block_bio_bounce(q, *bio_orig);
Jens Axboec43a5082007-01-12 12:20:26 +0100250
David Howells831058d2006-08-29 19:06:00 +0100251 bio->bi_flags |= (1 << BIO_BOUNCED);
David Howells831058d2006-08-29 19:06:00 +0100252
253 if (pool == page_pool) {
254 bio->bi_end_io = bounce_end_io_write;
255 if (rw == READ)
256 bio->bi_end_io = bounce_end_io_read;
257 } else {
258 bio->bi_end_io = bounce_end_io_write_isa;
259 if (rw == READ)
260 bio->bi_end_io = bounce_end_io_read_isa;
261 }
262
263 bio->bi_private = *bio_orig;
264 *bio_orig = bio;
265}
266
Jens Axboe165125e2007-07-24 09:28:11 +0200267void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
David Howells831058d2006-08-29 19:06:00 +0100268{
269 mempool_t *pool;
270
271 /*
Jens Axboebf2de6f2007-09-27 13:01:25 +0200272 * Data-less bio, nothing to bounce
273 */
Jens Axboe36144072008-08-14 13:12:15 +0200274 if (!bio_has_data(*bio_orig))
Jens Axboebf2de6f2007-09-27 13:01:25 +0200275 return;
276
277 /*
David Howells831058d2006-08-29 19:06:00 +0100278 * for non-isa bounce case, just check if the bounce pfn is equal
279 * to or bigger than the highest pfn in the system -- in that case,
280 * don't waste time iterating over bio segments
281 */
282 if (!(q->bounce_gfp & GFP_DMA)) {
Christoph Hellwig1c4bc3a2017-06-19 09:26:22 +0200283 if (q->limits.bounce_pfn >= blk_max_pfn)
David Howells831058d2006-08-29 19:06:00 +0100284 return;
285 pool = page_pool;
286 } else {
287 BUG_ON(!isa_page_pool);
288 pool = isa_page_pool;
289 }
290
David Howells831058d2006-08-29 19:06:00 +0100291 /*
292 * slow path
293 */
Jan Karaa3ad0a92015-06-18 17:19:14 +0200294 __blk_queue_bounce(q, bio_orig, pool);
David Howells831058d2006-08-29 19:06:00 +0100295}