blob: 038022763f09535e97525cd531c17c9352fffda4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
25
26/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
27#include <asm/io.h>
28
29#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
30#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
31#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
32#else
33#define BIOVEC_VIRT_START_SIZE(x) 0
34#define BIOVEC_VIRT_OVERSIZE(x) 0
35#endif
36
37#ifndef BIO_VMERGE_BOUNDARY
38#define BIO_VMERGE_BOUNDARY 0
39#endif
40
41#define BIO_DEBUG
42
43#ifdef BIO_DEBUG
44#define BIO_BUG_ON BUG_ON
45#else
46#define BIO_BUG_ON
47#endif
48
49#define BIO_MAX_PAGES (256)
50#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
51#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
52
53/*
54 * was unsigned short, but we might as well be ready for > 64kB I/O pages
55 */
56struct bio_vec {
57 struct page *bv_page;
58 unsigned int bv_len;
59 unsigned int bv_offset;
60};
61
62struct bio_set;
63struct bio;
64typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
65typedef void (bio_destructor_t) (struct bio *);
66
67/*
68 * main unit of I/O for the block layer and lower layers (ie drivers and
69 * stacking drivers)
70 */
71struct bio {
72 sector_t bi_sector;
73 struct bio *bi_next; /* request queue link */
74 struct block_device *bi_bdev;
75 unsigned long bi_flags; /* status, command, etc */
76 unsigned long bi_rw; /* bottom bits READ/WRITE,
77 * top bits priority
78 */
79
80 unsigned short bi_vcnt; /* how many bio_vec's */
81 unsigned short bi_idx; /* current index into bvl_vec */
82
83 /* Number of segments in this BIO after
84 * physical address coalescing is performed.
85 */
86 unsigned short bi_phys_segments;
87
88 /* Number of segments after physical and DMA remapping
89 * hardware coalescing is performed.
90 */
91 unsigned short bi_hw_segments;
92
93 unsigned int bi_size; /* residual I/O count */
94
95 /*
96 * To keep track of the max hw size, we account for the
97 * sizes of the first and last virtually mergeable segments
98 * in this bio
99 */
100 unsigned int bi_hw_front_size;
101 unsigned int bi_hw_back_size;
102
103 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
104
105 struct bio_vec *bi_io_vec; /* the actual vec list */
106
107 bio_end_io_t *bi_end_io;
108 atomic_t bi_cnt; /* pin count */
109
110 void *bi_private;
111
112 bio_destructor_t *bi_destructor; /* destructor */
113 struct bio_set *bi_set; /* memory pools set */
114};
115
116/*
117 * bio flags
118 */
119#define BIO_UPTODATE 0 /* ok after I/O completion */
120#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
121#define BIO_EOF 2 /* out-out-bounds error */
122#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
123#define BIO_CLONED 4 /* doesn't own data */
124#define BIO_BOUNCED 5 /* bio is a bounce bio */
125#define BIO_USER_MAPPED 6 /* contains user pages */
126#define BIO_EOPNOTSUPP 7 /* not supported */
127#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
128
129/*
130 * top 4 bits of bio flags indicate the pool this bio came from
131 */
132#define BIO_POOL_BITS (4)
133#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
134#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
135#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
136
137/*
138 * bio bi_rw flags
139 *
140 * bit 0 -- read (not set) or write (set)
141 * bit 1 -- rw-ahead when set
142 * bit 2 -- barrier
143 * bit 3 -- fail fast, don't want low level driver retries
144 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
145 */
146#define BIO_RW 0
147#define BIO_RW_AHEAD 1
148#define BIO_RW_BARRIER 2
149#define BIO_RW_FAILFAST 3
150#define BIO_RW_SYNC 4
151
152/*
153 * various member access, note that bio_data should of course not be used
154 * on highmem page vectors
155 */
156#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
157#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
158#define bio_page(bio) bio_iovec((bio))->bv_page
159#define bio_offset(bio) bio_iovec((bio))->bv_offset
160#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
161#define bio_sectors(bio) ((bio)->bi_size >> 9)
162#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
163#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
164#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
165#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
166#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
167#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
168
169/*
170 * will die
171 */
172#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
173#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
174
175/*
176 * queues that have highmem support enabled may still need to revert to
177 * PIO transfers occasionally and thus map high pages temporarily. For
178 * permanent PIO fall back, user is probably better off disabling highmem
179 * I/O completely on that queue (see ide-dma for example)
180 */
181#define __bio_kmap_atomic(bio, idx, kmtype) \
182 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
183 bio_iovec_idx((bio), (idx))->bv_offset)
184
185#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
186
187/*
188 * merge helpers etc
189 */
190
191#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
192#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
193
194/*
195 * allow arch override, for eg virtualized architectures (put in asm/io.h)
196 */
197#ifndef BIOVEC_PHYS_MERGEABLE
198#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
199 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
200#endif
201
202#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
203 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
204#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
205 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
206#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
207 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
208#define BIO_SEG_BOUNDARY(q, b1, b2) \
209 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
210
211#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
212
213/*
214 * drivers should not use the __ version unless they _really_ want to
215 * run through the entire bio and not just pending pieces
216 */
217#define __bio_for_each_segment(bvl, bio, i, start_idx) \
218 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
219 i < (bio)->bi_vcnt; \
220 bvl++, i++)
221
222#define bio_for_each_segment(bvl, bio, i) \
223 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
224
225/*
226 * get a reference to a bio, so it won't disappear. the intended use is
227 * something like:
228 *
229 * bio_get(bio);
230 * submit_bio(rw, bio);
231 * if (bio->bi_flags ...)
232 * do_something
233 * bio_put(bio);
234 *
235 * without the bio_get(), it could potentially complete I/O before submit_bio
236 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
237 * runs
238 */
239#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
240
241
242/*
243 * A bio_pair is used when we need to split a bio.
244 * This can only happen for a bio that refers to just one
245 * page of data, and in the unusual situation when the
246 * page crosses a chunk/device boundary
247 *
248 * The address of the master bio is stored in bio1.bi_private
249 * The address of the pool the pair was allocated from is stored
250 * in bio2.bi_private
251 */
252struct bio_pair {
253 struct bio bio1, bio2;
254 struct bio_vec bv1, bv2;
255 atomic_t cnt;
256 int error;
257};
258extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
259 int first_sectors);
260extern mempool_t *bio_split_pool;
261extern void bio_pair_release(struct bio_pair *dbio);
262
263extern struct bio_set *bioset_create(int, int, int);
264extern void bioset_free(struct bio_set *);
265
266extern struct bio *bio_alloc(unsigned int __nocast, int);
267extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *);
268extern void bio_put(struct bio *);
269
270extern void bio_endio(struct bio *, unsigned int, int);
271struct request_queue;
272extern int bio_phys_segments(struct request_queue *, struct bio *);
273extern int bio_hw_segments(struct request_queue *, struct bio *);
274
275extern void __bio_clone(struct bio *, struct bio *);
276extern struct bio *bio_clone(struct bio *, unsigned int __nocast);
277
278extern void bio_init(struct bio *);
279
280extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
281extern int bio_get_nr_vecs(struct block_device *);
282extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
283 unsigned long, unsigned int, int);
284extern void bio_unmap_user(struct bio *);
285extern void bio_set_pages_dirty(struct bio *bio);
286extern void bio_check_pages_dirty(struct bio *bio);
287extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
288extern int bio_uncopy_user(struct bio *);
289void zero_fill_bio(struct bio *bio);
290
291#ifdef CONFIG_HIGHMEM
292/*
293 * remember to add offset! and never ever reenable interrupts between a
294 * bvec_kmap_irq and bvec_kunmap_irq!!
295 *
296 * This function MUST be inlined - it plays with the CPU interrupt flags.
297 * Hence the `extern inline'.
298 */
299extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
300{
301 unsigned long addr;
302
303 /*
304 * might not be a highmem page, but the preempt/irq count
305 * balancing is a lot nicer this way
306 */
307 local_irq_save(*flags);
308 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
309
310 BUG_ON(addr & ~PAGE_MASK);
311
312 return (char *) addr + bvec->bv_offset;
313}
314
315extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
316{
317 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
318
319 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
320 local_irq_restore(*flags);
321}
322
323#else
324#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
325#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
326#endif
327
328extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
329 unsigned long *flags)
330{
331 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
332}
333#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
334
335#define bio_kmap_irq(bio, flags) \
336 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
337#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
338
339#endif /* __LINUX_BIO_H */