blob: dea61f6ab8cbdbaffedceb4c64bda239b51a63a4 [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -040027#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040029
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040030#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -040036#include <linux/io.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020037#include <linux/rbtree.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040038#include <asm/setup.h>
39#include <asm/pgalloc.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040040#include <asm/hypervisor.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080041#include <xen/grant_table.h>
Julien Grall67de5df2015-05-05 16:25:56 +010042#include <xen/page.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040043#include <xen/xenbus.h>
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040044#include <xen/interface/io/ring.h>
45#include <xen/interface/io/blkif.h>
46#include <xen/interface/io/protocols.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040047
Bob Liu86839c52015-06-03 13:40:03 +080048extern unsigned int xen_blkif_max_ring_order;
Bob Liud62d8602015-11-14 11:12:17 +080049extern unsigned int xenblk_max_queues;
Roger Pau Monne402b27f2013-04-18 16:06:54 +020050/*
51 * This is the maximum number of segments that would be allowed in indirect
52 * requests. This value will also be passed to the frontend.
53 */
54#define MAX_INDIRECT_SEGMENTS 256
55
Julien Grall67de5df2015-05-05 16:25:56 +010056/*
57 * Xen use 4K pages. The guest may use different page size (4K or 64K)
58 * Number of Xen pages per segment
59 */
60#define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
61
62#define XEN_PAGES_PER_INDIRECT_FRAME \
63 (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
64#define SEGS_PER_INDIRECT_FRAME \
65 (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
66
Roger Pau Monne402b27f2013-04-18 16:06:54 +020067#define MAX_INDIRECT_PAGES \
68 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
Julien Grall67de5df2015-05-05 16:25:56 +010069#define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
Roger Pau Monne402b27f2013-04-18 16:06:54 +020070
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040071/* Not a real protocol. Used to generate ring structs which contain
72 * the elements common to all protocols only. This way we get a
73 * compiler-checkable way to use common struct elements, so we can
74 * avoid using switch(protocol) in a number of places. */
75struct blkif_common_request {
76 char dummy;
77};
78struct blkif_common_response {
79 char dummy;
80};
81
Li Dongyangb3cb0d62011-09-01 18:39:10 +080082struct blkif_x86_32_request_rw {
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040083 uint8_t nr_segments; /* number of segments */
84 blkif_vdev_t handle; /* only for read/write requests */
85 uint64_t id; /* private guest value, echoed in resp */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040086 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
87 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
88} __attribute__((__packed__));
89
90struct blkif_x86_32_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -040091 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040092 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
93 uint64_t id; /* private guest value, echoed in resp */
94 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
95 uint64_t nr_sectors;
96} __attribute__((__packed__));
97
David Vrabel0e367ae2013-03-07 17:32:01 +000098struct blkif_x86_32_request_other {
99 uint8_t _pad1;
100 blkif_vdev_t _pad2;
101 uint64_t id; /* private guest value, echoed in resp */
102} __attribute__((__packed__));
103
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200104struct blkif_x86_32_request_indirect {
105 uint8_t indirect_op;
106 uint16_t nr_segments;
107 uint64_t id;
108 blkif_sector_t sector_number;
109 blkif_vdev_t handle;
110 uint16_t _pad1;
111 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
112 /*
113 * The maximum number of indirect segments (and pages) that will
114 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
115 * is also exported to the guest (via xenstore
116 * feature-max-indirect-segments entry), so the frontend knows how
117 * many indirect segments the backend supports.
118 */
119 uint64_t _pad2; /* make it 64 byte aligned */
120} __attribute__((__packed__));
121
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400122struct blkif_x86_32_request {
123 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800124 union {
125 struct blkif_x86_32_request_rw rw;
126 struct blkif_x86_32_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000127 struct blkif_x86_32_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200128 struct blkif_x86_32_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800129 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400130} __attribute__((__packed__));
131
132/* i386 protocol version */
133#pragma pack(push, 4)
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400134struct blkif_x86_32_response {
135 uint64_t id; /* copied from request */
136 uint8_t operation; /* copied from request */
137 int16_t status; /* BLKIF_RSP_??? */
138};
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400139#pragma pack(pop)
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400140/* x86_64 protocol version */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800141
142struct blkif_x86_64_request_rw {
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400143 uint8_t nr_segments; /* number of segments */
144 blkif_vdev_t handle; /* only for read/write requests */
145 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
146 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800147 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
148 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400149} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800150
151struct blkif_x86_64_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400152 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400153 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
154 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
155 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800156 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400157 uint64_t nr_sectors;
158} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800159
David Vrabel0e367ae2013-03-07 17:32:01 +0000160struct blkif_x86_64_request_other {
161 uint8_t _pad1;
162 blkif_vdev_t _pad2;
163 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
164 uint64_t id; /* private guest value, echoed in resp */
165} __attribute__((__packed__));
166
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200167struct blkif_x86_64_request_indirect {
168 uint8_t indirect_op;
169 uint16_t nr_segments;
170 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
171 uint64_t id;
172 blkif_sector_t sector_number;
173 blkif_vdev_t handle;
174 uint16_t _pad2;
175 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
176 /*
177 * The maximum number of indirect segments (and pages) that will
178 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
179 * is also exported to the guest (via xenstore
180 * feature-max-indirect-segments entry), so the frontend knows how
181 * many indirect segments the backend supports.
182 */
183 uint32_t _pad3; /* make it 64 byte aligned */
184} __attribute__((__packed__));
185
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400186struct blkif_x86_64_request {
187 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800188 union {
189 struct blkif_x86_64_request_rw rw;
190 struct blkif_x86_64_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000191 struct blkif_x86_64_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200192 struct blkif_x86_64_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800193 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400194} __attribute__((__packed__));
195
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400196struct blkif_x86_64_response {
197 uint64_t __attribute__((__aligned__(8))) id;
198 uint8_t operation; /* copied from request */
199 int16_t status; /* BLKIF_RSP_??? */
200};
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400201
202DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
203 struct blkif_common_response);
204DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
205 struct blkif_x86_32_response);
206DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
207 struct blkif_x86_64_response);
208
209union blkif_back_rings {
210 struct blkif_back_ring native;
211 struct blkif_common_back_ring common;
212 struct blkif_x86_32_back_ring x86_32;
213 struct blkif_x86_64_back_ring x86_64;
214};
215
216enum blkif_protocol {
217 BLKIF_PROTOCOL_NATIVE = 1,
218 BLKIF_PROTOCOL_X86_32 = 2,
219 BLKIF_PROTOCOL_X86_64 = 3,
220};
221
David Vrabelb042a3c2015-02-05 17:09:56 +0000222/*
223 * Default protocol if the frontend doesn't specify one.
224 */
225#ifdef CONFIG_X86
226# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
227#else
228# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
229#endif
230
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400231struct xen_vbd {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400232 /* What the domain refers to this vbd as. */
233 blkif_vdev_t handle;
234 /* Non-zero -> read-only */
235 unsigned char readonly;
236 /* VDISK_xxx */
237 unsigned char type;
238 /* phys device that this vbd maps to. */
239 u32 pdevice;
240 struct block_device *bdev;
241 /* Cached size parameter. */
242 sector_t size;
Oliver Chick1f999572012-09-21 10:04:18 +0100243 unsigned int flush_support:1;
244 unsigned int discard_secure:1;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200245 unsigned int feature_gnt_persistent:1;
246 unsigned int overflow_max_grants:1;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400247};
248
249struct backend_info;
250
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200251/* Number of available flags */
252#define PERSISTENT_GNT_FLAGS_SIZE 2
253/* This persistent grant is currently in use */
254#define PERSISTENT_GNT_ACTIVE 0
255/*
256 * This persistent grant has been used, this flag is set when we remove the
257 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
258 */
259#define PERSISTENT_GNT_WAS_ACTIVE 1
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200260
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200261/* Number of requests that we can fit in a ring */
Bob Liu69b91ed2015-06-03 13:40:01 +0800262#define XEN_BLKIF_REQS_PER_PAGE 32
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200263
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200264struct persistent_gnt {
265 struct page *page;
266 grant_ref_t gnt;
267 grant_handle_t handle;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200268 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200269 struct rb_node node;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200270 struct list_head remove_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200271};
272
Bob Liu59795702015-11-14 11:12:15 +0800273/* Per-ring information. */
274struct xen_blkif_ring {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400275 /* Physical parameters of the comms window. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400276 unsigned int irq;
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400277 union blkif_back_rings blk_rings;
David Vrabel2d073842011-09-29 16:53:30 +0100278 void *blk_ring;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400279 /* Private fields. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400280 spinlock_t blk_ring_lock;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400281
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400282 wait_queue_head_t wq;
Roger Pau Monnec05f3e32014-02-04 11:26:14 +0100283 atomic_t inflight;
Bob Liu59795702015-11-14 11:12:15 +0800284 /* One thread per blkif ring. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400285 struct task_struct *xenblkd;
286 unsigned int waiting_reqs;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400287
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200288 /* List of all 'pending_req' available */
289 struct list_head pending_free;
290 /* And its spinlock. */
291 spinlock_t pending_free_lock;
292 wait_queue_head_t pending_free_wq;
293
Bob Liud4bf0062015-11-14 11:12:19 +0800294 /* Tree to store persistent grants. */
295 spinlock_t pers_gnts_lock;
296 struct rb_root persistent_gnts;
297 unsigned int persistent_gnt_c;
298 atomic_t persistent_gnt_in_use;
299 unsigned long next_lru;
300
Bob Liudb6fbc12015-12-09 07:44:02 +0800301 /* Statistics. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400302 unsigned long st_print;
Bob Liudb6fbc12015-12-09 07:44:02 +0800303 unsigned long long st_rd_req;
304 unsigned long long st_wr_req;
305 unsigned long long st_oo_req;
306 unsigned long long st_f_req;
307 unsigned long long st_ds_req;
308 unsigned long long st_rd_sect;
309 unsigned long long st_wr_sect;
310
Bob Liud4bf0062015-11-14 11:12:19 +0800311 /* Used by the kworker that offload work from the persistent purge. */
312 struct list_head persistent_purge_list;
313 struct work_struct persistent_purge_work;
314
315 /* Buffer of free pages to map grant refs. */
316 spinlock_t free_pages_lock;
317 int free_pages_num;
318 struct list_head free_pages;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400319
Valentin Priescu814d04e2014-05-20 22:28:50 +0200320 struct work_struct free_work;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500321 /* Thread shutdown wait queue. */
322 wait_queue_head_t shutdown_wq;
Bob Liu59795702015-11-14 11:12:15 +0800323 struct xen_blkif *blkif;
324};
325
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400326struct xen_blkif {
327 /* Unique identifier for this interface. */
328 domid_t domid;
329 unsigned int handle;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400330 /* Comms information. */
331 enum blkif_protocol blk_protocol;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400332 /* The VBD attached to this interface. */
333 struct xen_vbd vbd;
334 /* Back pointer to the backend_info. */
335 struct backend_info *be;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400336 atomic_t refcnt;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400337 /* for barrier (drain) requests */
338 struct completion drain_complete;
339 atomic_t drain;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400340
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400341 struct work_struct free_work;
Bob Liu59795702015-11-14 11:12:15 +0800342 unsigned int nr_ring_pages;
343 /* All rings for this device. */
Konrad Rzeszutek Wilk2fb1ef42015-12-11 12:08:48 -0500344 struct xen_blkif_ring *rings;
345 unsigned int nr_rings;
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400346};
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400347
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200348struct seg_buf {
349 unsigned long offset;
350 unsigned int nsec;
351};
352
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200353struct grant_page {
354 struct page *page;
355 struct persistent_gnt *persistent_gnt;
356 grant_handle_t handle;
357 grant_ref_t gref;
358};
359
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200360/*
361 * Each outstanding request that we've passed to the lower device layers has a
362 * 'pending_req' allocated to it. Each buffer_head that completes decrements
363 * the pendcnt towards zero. When it hits zero, the specified domain has a
364 * response queued for it, with the saved 'id' passed back.
365 */
366struct pending_req {
Bob Liu59795702015-11-14 11:12:15 +0800367 struct xen_blkif_ring *ring;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200368 u64 id;
Julien Grall6684fa12015-06-17 15:28:08 +0100369 int nr_segs;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200370 atomic_t pendcnt;
371 unsigned short operation;
372 int status;
373 struct list_head free_list;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200374 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200375 /* Indirect descriptors */
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200376 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200377 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
378 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000379 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
380 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
381 struct gntab_unmap_queue_data gnttab_unmap_data;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200382};
383
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400384
385#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
386 (_v)->bdev->bd_part->nr_sects : \
387 get_capacity((_v)->bdev->bd_disk))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400388
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400389#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
390#define xen_blkif_put(_b) \
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400391 do { \
392 if (atomic_dec_and_test(&(_b)->refcnt)) \
Valentin Priescu814d04e2014-05-20 22:28:50 +0200393 schedule_work(&(_b)->free_work);\
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400394 } while (0)
395
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400396struct phys_req {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400397 unsigned short dev;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800398 blkif_sector_t nr_sects;
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400399 struct block_device *bdev;
400 blkif_sector_t sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400401};
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400402int xen_blkif_interface_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400403
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400404int xen_blkif_xenbus_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400405
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400406irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
407int xen_blkif_schedule(void *arg);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200408int xen_blkif_purge_persistent(void *arg);
Bob Liu59795702015-11-14 11:12:15 +0800409void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400410
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -0400411int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
412 struct backend_info *be, int state);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400413
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400414int xen_blkbk_barrier(struct xenbus_transaction xbt,
415 struct backend_info *be, int state);
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400416struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
Roger Pau Monneabb97b82014-02-11 20:34:03 -0700417void xen_blkbk_unmap_purged_grants(struct work_struct *work);
Jeremy Fitzhardinge98e036a2010-03-18 15:35:05 -0700418
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400419static inline void blkif_get_x86_32_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400420 struct blkif_x86_32_request *src)
421{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200422 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Roger Pau Monné1f13d752015-11-03 16:34:09 +0000423 dst->operation = READ_ONCE(src->operation);
424 switch (dst->operation) {
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800425 case BLKIF_OP_READ:
426 case BLKIF_OP_WRITE:
427 case BLKIF_OP_WRITE_BARRIER:
428 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400429 dst->u.rw.nr_segments = src->u.rw.nr_segments;
430 dst->u.rw.handle = src->u.rw.handle;
431 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800432 dst->u.rw.sector_number = src->u.rw.sector_number;
433 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400434 if (n > dst->u.rw.nr_segments)
435 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800436 for (i = 0; i < n; i++)
437 dst->u.rw.seg[i] = src->u.rw.seg[i];
438 break;
439 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400440 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400441 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800442 dst->u.discard.sector_number = src->u.discard.sector_number;
443 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
444 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200445 case BLKIF_OP_INDIRECT:
446 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
447 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
448 dst->u.indirect.handle = src->u.indirect.handle;
449 dst->u.indirect.id = src->u.indirect.id;
450 dst->u.indirect.sector_number = src->u.indirect.sector_number;
451 barrier();
452 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
453 for (i = 0; i < j; i++)
454 dst->u.indirect.indirect_grefs[i] =
455 src->u.indirect.indirect_grefs[i];
456 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800457 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000458 /*
459 * Don't know how to translate this op. Only get the
460 * ID so failure can be reported to the frontend.
461 */
462 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800463 break;
464 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400465}
466
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400467static inline void blkif_get_x86_64_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400468 struct blkif_x86_64_request *src)
469{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200470 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Roger Pau Monné1f13d752015-11-03 16:34:09 +0000471 dst->operation = READ_ONCE(src->operation);
472 switch (dst->operation) {
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800473 case BLKIF_OP_READ:
474 case BLKIF_OP_WRITE:
475 case BLKIF_OP_WRITE_BARRIER:
476 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400477 dst->u.rw.nr_segments = src->u.rw.nr_segments;
478 dst->u.rw.handle = src->u.rw.handle;
479 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800480 dst->u.rw.sector_number = src->u.rw.sector_number;
481 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400482 if (n > dst->u.rw.nr_segments)
483 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800484 for (i = 0; i < n; i++)
485 dst->u.rw.seg[i] = src->u.rw.seg[i];
486 break;
487 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400488 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400489 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800490 dst->u.discard.sector_number = src->u.discard.sector_number;
491 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
492 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200493 case BLKIF_OP_INDIRECT:
494 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
495 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
496 dst->u.indirect.handle = src->u.indirect.handle;
497 dst->u.indirect.id = src->u.indirect.id;
498 dst->u.indirect.sector_number = src->u.indirect.sector_number;
499 barrier();
500 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
501 for (i = 0; i < j; i++)
502 dst->u.indirect.indirect_grefs[i] =
503 src->u.indirect.indirect_grefs[i];
504 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800505 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000506 /*
507 * Don't know how to translate this op. Only get the
508 * ID so failure can be reported to the frontend.
509 */
510 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800511 break;
512 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400513}
514
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -0400515#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */