blob: 45a044a53d1e562db4e606623840d0c667aa56e3 [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -040027#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040029
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040030#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -040036#include <linux/io.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020037#include <linux/rbtree.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040038#include <asm/setup.h>
39#include <asm/pgalloc.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040040#include <asm/hypervisor.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080041#include <xen/grant_table.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040042#include <xen/xenbus.h>
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040043#include <xen/interface/io/ring.h>
44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040046
Bob Liu86839c52015-06-03 13:40:03 +080047extern unsigned int xen_blkif_max_ring_order;
Roger Pau Monne402b27f2013-04-18 16:06:54 +020048/*
49 * This is the maximum number of segments that would be allowed in indirect
50 * requests. This value will also be passed to the frontend.
51 */
52#define MAX_INDIRECT_SEGMENTS 256
53
54#define SEGS_PER_INDIRECT_FRAME \
Roger Pau Monne80bfa2f2014-02-04 11:26:15 +010055 (PAGE_SIZE/sizeof(struct blkif_request_segment))
Roger Pau Monne402b27f2013-04-18 16:06:54 +020056#define MAX_INDIRECT_PAGES \
57 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
58#define INDIRECT_PAGES(_segs) \
59 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
60
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040061/* Not a real protocol. Used to generate ring structs which contain
62 * the elements common to all protocols only. This way we get a
63 * compiler-checkable way to use common struct elements, so we can
64 * avoid using switch(protocol) in a number of places. */
65struct blkif_common_request {
66 char dummy;
67};
68struct blkif_common_response {
69 char dummy;
70};
71
Li Dongyangb3cb0d62011-09-01 18:39:10 +080072struct blkif_x86_32_request_rw {
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040073 uint8_t nr_segments; /* number of segments */
74 blkif_vdev_t handle; /* only for read/write requests */
75 uint64_t id; /* private guest value, echoed in resp */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040076 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
77 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
78} __attribute__((__packed__));
79
80struct blkif_x86_32_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -040081 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040082 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
83 uint64_t id; /* private guest value, echoed in resp */
84 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
85 uint64_t nr_sectors;
86} __attribute__((__packed__));
87
David Vrabel0e367ae2013-03-07 17:32:01 +000088struct blkif_x86_32_request_other {
89 uint8_t _pad1;
90 blkif_vdev_t _pad2;
91 uint64_t id; /* private guest value, echoed in resp */
92} __attribute__((__packed__));
93
Roger Pau Monne402b27f2013-04-18 16:06:54 +020094struct blkif_x86_32_request_indirect {
95 uint8_t indirect_op;
96 uint16_t nr_segments;
97 uint64_t id;
98 blkif_sector_t sector_number;
99 blkif_vdev_t handle;
100 uint16_t _pad1;
101 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
102 /*
103 * The maximum number of indirect segments (and pages) that will
104 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
105 * is also exported to the guest (via xenstore
106 * feature-max-indirect-segments entry), so the frontend knows how
107 * many indirect segments the backend supports.
108 */
109 uint64_t _pad2; /* make it 64 byte aligned */
110} __attribute__((__packed__));
111
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400112struct blkif_x86_32_request {
113 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800114 union {
115 struct blkif_x86_32_request_rw rw;
116 struct blkif_x86_32_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000117 struct blkif_x86_32_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200118 struct blkif_x86_32_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800119 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400120} __attribute__((__packed__));
121
122/* i386 protocol version */
123#pragma pack(push, 4)
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400124struct blkif_x86_32_response {
125 uint64_t id; /* copied from request */
126 uint8_t operation; /* copied from request */
127 int16_t status; /* BLKIF_RSP_??? */
128};
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400129#pragma pack(pop)
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400130/* x86_64 protocol version */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800131
132struct blkif_x86_64_request_rw {
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400133 uint8_t nr_segments; /* number of segments */
134 blkif_vdev_t handle; /* only for read/write requests */
135 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
136 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800137 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
138 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400139} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800140
141struct blkif_x86_64_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400142 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400143 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
144 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
145 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800146 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400147 uint64_t nr_sectors;
148} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800149
David Vrabel0e367ae2013-03-07 17:32:01 +0000150struct blkif_x86_64_request_other {
151 uint8_t _pad1;
152 blkif_vdev_t _pad2;
153 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
154 uint64_t id; /* private guest value, echoed in resp */
155} __attribute__((__packed__));
156
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200157struct blkif_x86_64_request_indirect {
158 uint8_t indirect_op;
159 uint16_t nr_segments;
160 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
161 uint64_t id;
162 blkif_sector_t sector_number;
163 blkif_vdev_t handle;
164 uint16_t _pad2;
165 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
166 /*
167 * The maximum number of indirect segments (and pages) that will
168 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
169 * is also exported to the guest (via xenstore
170 * feature-max-indirect-segments entry), so the frontend knows how
171 * many indirect segments the backend supports.
172 */
173 uint32_t _pad3; /* make it 64 byte aligned */
174} __attribute__((__packed__));
175
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400176struct blkif_x86_64_request {
177 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800178 union {
179 struct blkif_x86_64_request_rw rw;
180 struct blkif_x86_64_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000181 struct blkif_x86_64_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200182 struct blkif_x86_64_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800183 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400184} __attribute__((__packed__));
185
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400186struct blkif_x86_64_response {
187 uint64_t __attribute__((__aligned__(8))) id;
188 uint8_t operation; /* copied from request */
189 int16_t status; /* BLKIF_RSP_??? */
190};
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400191
192DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
193 struct blkif_common_response);
194DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
195 struct blkif_x86_32_response);
196DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
197 struct blkif_x86_64_response);
198
199union blkif_back_rings {
200 struct blkif_back_ring native;
201 struct blkif_common_back_ring common;
202 struct blkif_x86_32_back_ring x86_32;
203 struct blkif_x86_64_back_ring x86_64;
204};
205
206enum blkif_protocol {
207 BLKIF_PROTOCOL_NATIVE = 1,
208 BLKIF_PROTOCOL_X86_32 = 2,
209 BLKIF_PROTOCOL_X86_64 = 3,
210};
211
David Vrabelb042a3c2015-02-05 17:09:56 +0000212/*
213 * Default protocol if the frontend doesn't specify one.
214 */
215#ifdef CONFIG_X86
216# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
217#else
218# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
219#endif
220
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400221struct xen_vbd {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400222 /* What the domain refers to this vbd as. */
223 blkif_vdev_t handle;
224 /* Non-zero -> read-only */
225 unsigned char readonly;
226 /* VDISK_xxx */
227 unsigned char type;
228 /* phys device that this vbd maps to. */
229 u32 pdevice;
230 struct block_device *bdev;
231 /* Cached size parameter. */
232 sector_t size;
Oliver Chick1f999572012-09-21 10:04:18 +0100233 unsigned int flush_support:1;
234 unsigned int discard_secure:1;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200235 unsigned int feature_gnt_persistent:1;
236 unsigned int overflow_max_grants:1;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400237};
238
239struct backend_info;
240
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200241/* Number of available flags */
242#define PERSISTENT_GNT_FLAGS_SIZE 2
243/* This persistent grant is currently in use */
244#define PERSISTENT_GNT_ACTIVE 0
245/*
246 * This persistent grant has been used, this flag is set when we remove the
247 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
248 */
249#define PERSISTENT_GNT_WAS_ACTIVE 1
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200250
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200251/* Number of requests that we can fit in a ring */
Bob Liu69b91ed2015-06-03 13:40:01 +0800252#define XEN_BLKIF_REQS_PER_PAGE 32
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200253
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200254struct persistent_gnt {
255 struct page *page;
256 grant_ref_t gnt;
257 grant_handle_t handle;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200258 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200259 struct rb_node node;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200260 struct list_head remove_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200261};
262
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400263struct xen_blkif {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400264 /* Unique identifier for this interface. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400265 domid_t domid;
266 unsigned int handle;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400267 /* Physical parameters of the comms window. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400268 unsigned int irq;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400269 /* Comms information. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400270 enum blkif_protocol blk_protocol;
271 union blkif_back_rings blk_rings;
David Vrabel2d073842011-09-29 16:53:30 +0100272 void *blk_ring;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400273 /* The VBD attached to this interface. */
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400274 struct xen_vbd vbd;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400275 /* Back pointer to the backend_info. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400276 struct backend_info *be;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400277 /* Private fields. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400278 spinlock_t blk_ring_lock;
279 atomic_t refcnt;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400280
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400281 wait_queue_head_t wq;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400282 /* for barrier (drain) requests */
283 struct completion drain_complete;
284 atomic_t drain;
Roger Pau Monnec05f3e32014-02-04 11:26:14 +0100285 atomic_t inflight;
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400286 /* One thread per one blkif. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400287 struct task_struct *xenblkd;
288 unsigned int waiting_reqs;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400289
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200290 /* tree to store persistent grants */
291 struct rb_root persistent_gnts;
292 unsigned int persistent_gnt_c;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200293 atomic_t persistent_gnt_in_use;
294 unsigned long next_lru;
295
296 /* used by the kworker that offload work from the persistent purge */
297 struct list_head persistent_purge_list;
298 struct work_struct persistent_purge_work;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200299
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200300 /* buffer of free pages to map grant refs */
301 spinlock_t free_pages_lock;
302 int free_pages_num;
303 struct list_head free_pages;
304
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200305 /* List of all 'pending_req' available */
306 struct list_head pending_free;
307 /* And its spinlock. */
308 spinlock_t pending_free_lock;
309 wait_queue_head_t pending_free_wq;
310
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400311 /* statistics */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400312 unsigned long st_print;
Zoltan Kiss986cacb2013-03-11 16:15:50 +0000313 unsigned long long st_rd_req;
314 unsigned long long st_wr_req;
315 unsigned long long st_oo_req;
316 unsigned long long st_f_req;
317 unsigned long long st_ds_req;
318 unsigned long long st_rd_sect;
319 unsigned long long st_wr_sect;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400320
Valentin Priescu814d04e2014-05-20 22:28:50 +0200321 struct work_struct free_work;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500322 /* Thread shutdown wait queue. */
323 wait_queue_head_t shutdown_wq;
Bob Liu86839c52015-06-03 13:40:03 +0800324 unsigned int nr_ring_pages;
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400325};
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400326
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200327struct seg_buf {
328 unsigned long offset;
329 unsigned int nsec;
330};
331
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200332struct grant_page {
333 struct page *page;
334 struct persistent_gnt *persistent_gnt;
335 grant_handle_t handle;
336 grant_ref_t gref;
337};
338
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200339/*
340 * Each outstanding request that we've passed to the lower device layers has a
341 * 'pending_req' allocated to it. Each buffer_head that completes decrements
342 * the pendcnt towards zero. When it hits zero, the specified domain has a
343 * response queued for it, with the saved 'id' passed back.
344 */
345struct pending_req {
346 struct xen_blkif *blkif;
347 u64 id;
Julien Grall6684fa12015-06-17 15:28:08 +0100348 int nr_segs;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200349 atomic_t pendcnt;
350 unsigned short operation;
351 int status;
352 struct list_head free_list;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200353 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200354 /* Indirect descriptors */
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200355 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200356 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
357 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000358 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
359 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
360 struct gntab_unmap_queue_data gnttab_unmap_data;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200361};
362
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400363
364#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
365 (_v)->bdev->bd_part->nr_sects : \
366 get_capacity((_v)->bdev->bd_disk))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400367
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400368#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
369#define xen_blkif_put(_b) \
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400370 do { \
371 if (atomic_dec_and_test(&(_b)->refcnt)) \
Valentin Priescu814d04e2014-05-20 22:28:50 +0200372 schedule_work(&(_b)->free_work);\
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400373 } while (0)
374
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400375struct phys_req {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400376 unsigned short dev;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800377 blkif_sector_t nr_sects;
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400378 struct block_device *bdev;
379 blkif_sector_t sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400380};
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400381int xen_blkif_interface_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400382
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400383int xen_blkif_xenbus_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400384
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400385irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
386int xen_blkif_schedule(void *arg);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200387int xen_blkif_purge_persistent(void *arg);
Roger Pau Monneef753412014-02-04 11:26:13 +0100388void xen_blkbk_free_caches(struct xen_blkif *blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400389
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -0400390int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
391 struct backend_info *be, int state);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400392
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400393int xen_blkbk_barrier(struct xenbus_transaction xbt,
394 struct backend_info *be, int state);
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400395struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
Roger Pau Monneabb97b82014-02-11 20:34:03 -0700396void xen_blkbk_unmap_purged_grants(struct work_struct *work);
Jeremy Fitzhardinge98e036a2010-03-18 15:35:05 -0700397
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400398static inline void blkif_get_x86_32_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400399 struct blkif_x86_32_request *src)
400{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200401 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400402 dst->operation = src->operation;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800403 switch (src->operation) {
404 case BLKIF_OP_READ:
405 case BLKIF_OP_WRITE:
406 case BLKIF_OP_WRITE_BARRIER:
407 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400408 dst->u.rw.nr_segments = src->u.rw.nr_segments;
409 dst->u.rw.handle = src->u.rw.handle;
410 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800411 dst->u.rw.sector_number = src->u.rw.sector_number;
412 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400413 if (n > dst->u.rw.nr_segments)
414 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800415 for (i = 0; i < n; i++)
416 dst->u.rw.seg[i] = src->u.rw.seg[i];
417 break;
418 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400419 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400420 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800421 dst->u.discard.sector_number = src->u.discard.sector_number;
422 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
423 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200424 case BLKIF_OP_INDIRECT:
425 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
426 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
427 dst->u.indirect.handle = src->u.indirect.handle;
428 dst->u.indirect.id = src->u.indirect.id;
429 dst->u.indirect.sector_number = src->u.indirect.sector_number;
430 barrier();
431 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
432 for (i = 0; i < j; i++)
433 dst->u.indirect.indirect_grefs[i] =
434 src->u.indirect.indirect_grefs[i];
435 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800436 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000437 /*
438 * Don't know how to translate this op. Only get the
439 * ID so failure can be reported to the frontend.
440 */
441 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800442 break;
443 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400444}
445
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400446static inline void blkif_get_x86_64_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400447 struct blkif_x86_64_request *src)
448{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200449 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400450 dst->operation = src->operation;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800451 switch (src->operation) {
452 case BLKIF_OP_READ:
453 case BLKIF_OP_WRITE:
454 case BLKIF_OP_WRITE_BARRIER:
455 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400456 dst->u.rw.nr_segments = src->u.rw.nr_segments;
457 dst->u.rw.handle = src->u.rw.handle;
458 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800459 dst->u.rw.sector_number = src->u.rw.sector_number;
460 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400461 if (n > dst->u.rw.nr_segments)
462 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800463 for (i = 0; i < n; i++)
464 dst->u.rw.seg[i] = src->u.rw.seg[i];
465 break;
466 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400467 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400468 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800469 dst->u.discard.sector_number = src->u.discard.sector_number;
470 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
471 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200472 case BLKIF_OP_INDIRECT:
473 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
474 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
475 dst->u.indirect.handle = src->u.indirect.handle;
476 dst->u.indirect.id = src->u.indirect.id;
477 dst->u.indirect.sector_number = src->u.indirect.sector_number;
478 barrier();
479 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
480 for (i = 0; i < j; i++)
481 dst->u.indirect.indirect_grefs[i] =
482 src->u.indirect.indirect_grefs[i];
483 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800484 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000485 /*
486 * Don't know how to translate this op. Only get the
487 * ID so failure can be reported to the frontend.
488 */
489 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800490 break;
491 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400492}
493
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -0400494#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */