blob: fceb1a96480bfb9600e4664fa2b4992c8bb64210 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
David Howells831058d2006-08-29 19:06:00 +01002/* bounce buffer handling for block devices
3 *
4 * - Split from highmem.c
5 */
6
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
David Howells831058d2006-08-29 19:06:00 +01009#include <linux/mm.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040010#include <linux/export.h>
David Howells831058d2006-08-29 19:06:00 +010011#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
David Howells831058d2006-08-29 19:06:00 +010013#include <linux/bio.h>
14#include <linux/pagemap.h>
15#include <linux/mempool.h>
16#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040017#include <linux/backing-dev.h>
David Howells831058d2006-08-29 19:06:00 +010018#include <linux/init.h>
19#include <linux/hash.h>
20#include <linux/highmem.h>
David Vrabel3bcfeaf2011-10-20 21:24:30 +020021#include <linux/bootmem.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070022#include <linux/printk.h>
David Howells831058d2006-08-29 19:06:00 +010023#include <asm/tlbflush.h>
24
Li Zefan55782132009-06-09 13:43:05 +080025#include <trace/events/block.h>
Christoph Hellwig3bce0162017-06-19 09:26:21 +020026#include "blk.h"
Li Zefan55782132009-06-09 13:43:05 +080027
David Howells831058d2006-08-29 19:06:00 +010028#define POOL_SIZE 64
29#define ISA_POOL_SIZE 16
30
Bart Van Asschee0fc443a2017-06-21 10:55:45 -070031static struct bio_set *bounce_bio_set, *bounce_bio_split;
David Howells831058d2006-08-29 19:06:00 +010032static mempool_t *page_pool, *isa_page_pool;
33
Chris Metcalff1006252012-06-16 16:41:05 -040034#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
David Howells831058d2006-08-29 19:06:00 +010035static __init int init_emergency_pool(void)
36{
Chris Metcalff1006252012-06-16 16:41:05 -040037#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
David Vrabel3bcfeaf2011-10-20 21:24:30 +020038 if (max_pfn <= max_low_pfn)
David Howells831058d2006-08-29 19:06:00 +010039 return 0;
David Vrabel3bcfeaf2011-10-20 21:24:30 +020040#endif
David Howells831058d2006-08-29 19:06:00 +010041
42 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
43 BUG_ON(!page_pool);
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070044 pr_info("pool size: %d pages\n", POOL_SIZE);
David Howells831058d2006-08-29 19:06:00 +010045
NeilBrowna8821f32017-06-18 14:38:58 +100046 bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
47 BUG_ON(!bounce_bio_set);
48 if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
49 BUG_ON(1);
50
51 bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
52 BUG_ON(!bounce_bio_split);
53
David Howells831058d2006-08-29 19:06:00 +010054 return 0;
55}
56
57__initcall(init_emergency_pool);
Chris Metcalff1006252012-06-16 16:41:05 -040058#endif
David Howells831058d2006-08-29 19:06:00 +010059
Chris Metcalff1006252012-06-16 16:41:05 -040060#ifdef CONFIG_HIGHMEM
David Howells831058d2006-08-29 19:06:00 +010061/*
62 * highmem version, map in to vec
63 */
64static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
65{
66 unsigned long flags;
67 unsigned char *vto;
68
69 local_irq_save(flags);
Cong Wang9b04c5f2011-11-25 23:14:39 +080070 vto = kmap_atomic(to->bv_page);
David Howells831058d2006-08-29 19:06:00 +010071 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
Cong Wang9b04c5f2011-11-25 23:14:39 +080072 kunmap_atomic(vto);
David Howells831058d2006-08-29 19:06:00 +010073 local_irq_restore(flags);
74}
75
76#else /* CONFIG_HIGHMEM */
77
78#define bounce_copy_vec(to, vfrom) \
79 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
80
81#endif /* CONFIG_HIGHMEM */
82
83/*
84 * allocate pages in the DMA region for the ISA pool
85 */
86static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
87{
88 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
89}
90
91/*
92 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
93 * as the max address, so check if the pool has already been created.
94 */
95int init_emergency_isa_pool(void)
96{
97 if (isa_page_pool)
98 return 0;
99
100 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
101 mempool_free_pages, (void *) 0);
102 BUG_ON(!isa_page_pool);
103
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -0700104 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
David Howells831058d2006-08-29 19:06:00 +0100105 return 0;
106}
107
108/*
109 * Simple bounce buffer support for highmem pages. Depending on the
110 * queue gfp mask set, *to may or may not be a highmem page. kmap it
111 * always, it will do the Right Thing
112 */
113static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
114{
115 unsigned char *vfrom;
Kent Overstreet79886132013-11-23 17:19:00 -0800116 struct bio_vec tovec, *fromvec = from->bi_io_vec;
117 struct bvec_iter iter;
David Howells831058d2006-08-29 19:06:00 +0100118
Kent Overstreet79886132013-11-23 17:19:00 -0800119 bio_for_each_segment(tovec, to, iter) {
120 if (tovec.bv_page != fromvec->bv_page) {
121 /*
122 * fromvec->bv_offset and fromvec->bv_len might have
123 * been modified by the block layer, so use the original
124 * copy, bounce_copy_vec already uses tovec->bv_len
125 */
126 vfrom = page_address(fromvec->bv_page) +
127 tovec.bv_offset;
David Howells831058d2006-08-29 19:06:00 +0100128
Kent Overstreet79886132013-11-23 17:19:00 -0800129 bounce_copy_vec(&tovec, vfrom);
130 flush_dcache_page(tovec.bv_page);
131 }
David Howells831058d2006-08-29 19:06:00 +0100132
Kent Overstreet79886132013-11-23 17:19:00 -0800133 fromvec++;
David Howells831058d2006-08-29 19:06:00 +0100134 }
135}
136
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200137static void bounce_end_io(struct bio *bio, mempool_t *pool)
David Howells831058d2006-08-29 19:06:00 +0100138{
139 struct bio *bio_orig = bio->bi_private;
140 struct bio_vec *bvec, *org_vec;
141 int i;
Ming Lei99451872015-09-18 00:06:28 +0800142 int start = bio_orig->bi_iter.bi_idx;
David Howells831058d2006-08-29 19:06:00 +0100143
David Howells831058d2006-08-29 19:06:00 +0100144 /*
145 * free up bounce indirect pages used
146 */
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800147 bio_for_each_segment_all(bvec, bio, i) {
Ming Lei99451872015-09-18 00:06:28 +0800148 org_vec = bio_orig->bi_io_vec + i + start;
149
David Howells831058d2006-08-29 19:06:00 +0100150 if (bvec->bv_page == org_vec->bv_page)
151 continue;
152
153 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
154 mempool_free(bvec->bv_page, pool);
155 }
156
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200157 bio_orig->bi_status = bio->bi_status;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200158 bio_endio(bio_orig);
David Howells831058d2006-08-29 19:06:00 +0100159 bio_put(bio);
160}
161
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200162static void bounce_end_io_write(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100163{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200164 bounce_end_io(bio, page_pool);
David Howells831058d2006-08-29 19:06:00 +0100165}
166
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200167static void bounce_end_io_write_isa(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100168{
David Howells831058d2006-08-29 19:06:00 +0100169
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200170 bounce_end_io(bio, isa_page_pool);
David Howells831058d2006-08-29 19:06:00 +0100171}
172
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200173static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
David Howells831058d2006-08-29 19:06:00 +0100174{
175 struct bio *bio_orig = bio->bi_private;
176
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200177 if (!bio->bi_status)
David Howells831058d2006-08-29 19:06:00 +0100178 copy_to_high_bio_irq(bio_orig, bio);
179
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200180 bounce_end_io(bio, pool);
David Howells831058d2006-08-29 19:06:00 +0100181}
182
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200183static void bounce_end_io_read(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100184{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200185 __bounce_end_io_read(bio, page_pool);
David Howells831058d2006-08-29 19:06:00 +0100186}
187
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200188static void bounce_end_io_read_isa(struct bio *bio)
David Howells831058d2006-08-29 19:06:00 +0100189{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200190 __bounce_end_io_read(bio, isa_page_pool);
David Howells831058d2006-08-29 19:06:00 +0100191}
192
Jens Axboe165125e2007-07-24 09:28:11 +0200193static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
Jan Karaa3ad0a92015-06-18 17:19:14 +0200194 mempool_t *pool)
David Howells831058d2006-08-29 19:06:00 +0100195{
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700196 struct bio *bio;
197 int rw = bio_data_dir(*bio_orig);
Kent Overstreet79886132013-11-23 17:19:00 -0800198 struct bio_vec *to, from;
199 struct bvec_iter iter;
NeilBrowna8821f32017-06-18 14:38:58 +1000200 unsigned i = 0;
201 bool bounce = false;
202 int sectors = 0;
David Howells831058d2006-08-29 19:06:00 +0100203
NeilBrowna8821f32017-06-18 14:38:58 +1000204 bio_for_each_segment(from, *bio_orig, iter) {
205 if (i++ < BIO_MAX_PAGES)
206 sectors += from.bv_len >> 9;
Christoph Hellwig1c4bc3a2017-06-19 09:26:22 +0200207 if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
NeilBrowna8821f32017-06-18 14:38:58 +1000208 bounce = true;
209 }
210 if (!bounce)
211 return;
David Howells831058d2006-08-29 19:06:00 +0100212
NeilBrowna8821f32017-06-18 14:38:58 +1000213 if (sectors < bio_sectors(*bio_orig)) {
214 bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
215 bio_chain(bio, *bio_orig);
216 generic_make_request(*bio_orig);
217 *bio_orig = bio;
218 }
219 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700220
Kent Overstreetcb34e052012-09-05 15:22:02 -0700221 bio_for_each_segment_all(to, bio, i) {
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700222 struct page *page = to->bv_page;
223
Christoph Hellwig1c4bc3a2017-06-19 09:26:22 +0200224 if (page_to_pfn(page) <= q->limits.bounce_pfn)
David Howells831058d2006-08-29 19:06:00 +0100225 continue;
226
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700227 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
Wang YanQing393a3392015-04-26 16:43:31 +0800228 inc_zone_page_state(to->bv_page, NR_BOUNCE);
David Howells831058d2006-08-29 19:06:00 +0100229
230 if (rw == WRITE) {
231 char *vto, *vfrom;
232
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700233 flush_dcache_page(page);
234
David Howells831058d2006-08-29 19:06:00 +0100235 vto = page_address(to->bv_page) + to->bv_offset;
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700236 vfrom = kmap_atomic(page) + to->bv_offset;
David Howells831058d2006-08-29 19:06:00 +0100237 memcpy(vto, vfrom, to->bv_len);
Kent Overstreet6bc454d2012-09-10 14:30:37 -0700238 kunmap_atomic(vfrom);
David Howells831058d2006-08-29 19:06:00 +0100239 }
240 }
241
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100242 trace_block_bio_bounce(q, *bio_orig);
Jens Axboec43a5082007-01-12 12:20:26 +0100243
David Howells831058d2006-08-29 19:06:00 +0100244 bio->bi_flags |= (1 << BIO_BOUNCED);
David Howells831058d2006-08-29 19:06:00 +0100245
246 if (pool == page_pool) {
247 bio->bi_end_io = bounce_end_io_write;
248 if (rw == READ)
249 bio->bi_end_io = bounce_end_io_read;
250 } else {
251 bio->bi_end_io = bounce_end_io_write_isa;
252 if (rw == READ)
253 bio->bi_end_io = bounce_end_io_read_isa;
254 }
255
256 bio->bi_private = *bio_orig;
257 *bio_orig = bio;
258}
259
Jens Axboe165125e2007-07-24 09:28:11 +0200260void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
David Howells831058d2006-08-29 19:06:00 +0100261{
262 mempool_t *pool;
263
264 /*
Jens Axboebf2de6f2007-09-27 13:01:25 +0200265 * Data-less bio, nothing to bounce
266 */
Jens Axboe36144072008-08-14 13:12:15 +0200267 if (!bio_has_data(*bio_orig))
Jens Axboebf2de6f2007-09-27 13:01:25 +0200268 return;
269
270 /*
David Howells831058d2006-08-29 19:06:00 +0100271 * for non-isa bounce case, just check if the bounce pfn is equal
272 * to or bigger than the highest pfn in the system -- in that case,
273 * don't waste time iterating over bio segments
274 */
275 if (!(q->bounce_gfp & GFP_DMA)) {
Christoph Hellwig1c4bc3a2017-06-19 09:26:22 +0200276 if (q->limits.bounce_pfn >= blk_max_pfn)
David Howells831058d2006-08-29 19:06:00 +0100277 return;
278 pool = page_pool;
279 } else {
280 BUG_ON(!isa_page_pool);
281 pool = isa_page_pool;
282 }
283
David Howells831058d2006-08-29 19:06:00 +0100284 /*
285 * slow path
286 */
Jan Karaa3ad0a92015-06-18 17:19:14 +0200287 __blk_queue_bounce(q, bio_orig, pool);
David Howells831058d2006-08-29 19:06:00 +0100288}