blob: ecb35fe8ca8dbb54f36a85513a09064819acd67a [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -040027#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040029
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040030#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/vmalloc.h>
35#include <linux/wait.h>
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -040036#include <linux/io.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020037#include <linux/rbtree.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040038#include <asm/setup.h>
39#include <asm/pgalloc.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040040#include <asm/hypervisor.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080041#include <xen/grant_table.h>
Julien Grall67de5df2015-05-05 16:25:56 +010042#include <xen/page.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040043#include <xen/xenbus.h>
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040044#include <xen/interface/io/ring.h>
45#include <xen/interface/io/blkif.h>
46#include <xen/interface/io/protocols.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040047
Bob Liu86839c52015-06-03 13:40:03 +080048extern unsigned int xen_blkif_max_ring_order;
Bob Liud62d8602015-11-14 11:12:17 +080049extern unsigned int xenblk_max_queues;
Roger Pau Monne402b27f2013-04-18 16:06:54 +020050/*
51 * This is the maximum number of segments that would be allowed in indirect
52 * requests. This value will also be passed to the frontend.
53 */
54#define MAX_INDIRECT_SEGMENTS 256
55
Julien Grall67de5df2015-05-05 16:25:56 +010056/*
57 * Xen use 4K pages. The guest may use different page size (4K or 64K)
58 * Number of Xen pages per segment
59 */
60#define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
61
62#define XEN_PAGES_PER_INDIRECT_FRAME \
63 (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
64#define SEGS_PER_INDIRECT_FRAME \
65 (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
66
Roger Pau Monne402b27f2013-04-18 16:06:54 +020067#define MAX_INDIRECT_PAGES \
68 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
Julien Grall67de5df2015-05-05 16:25:56 +010069#define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
Roger Pau Monne402b27f2013-04-18 16:06:54 +020070
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040071/* Not a real protocol. Used to generate ring structs which contain
72 * the elements common to all protocols only. This way we get a
73 * compiler-checkable way to use common struct elements, so we can
74 * avoid using switch(protocol) in a number of places. */
75struct blkif_common_request {
76 char dummy;
77};
Jan Beulich4ae2cb92017-06-13 16:28:27 -040078
79/* i386 protocol version */
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040080
Li Dongyangb3cb0d62011-09-01 18:39:10 +080081struct blkif_x86_32_request_rw {
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -040082 uint8_t nr_segments; /* number of segments */
83 blkif_vdev_t handle; /* only for read/write requests */
84 uint64_t id; /* private guest value, echoed in resp */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040085 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
86 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
87} __attribute__((__packed__));
88
89struct blkif_x86_32_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -040090 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -040091 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
92 uint64_t id; /* private guest value, echoed in resp */
93 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
94 uint64_t nr_sectors;
95} __attribute__((__packed__));
96
David Vrabel0e367ae2013-03-07 17:32:01 +000097struct blkif_x86_32_request_other {
98 uint8_t _pad1;
99 blkif_vdev_t _pad2;
100 uint64_t id; /* private guest value, echoed in resp */
101} __attribute__((__packed__));
102
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200103struct blkif_x86_32_request_indirect {
104 uint8_t indirect_op;
105 uint16_t nr_segments;
106 uint64_t id;
107 blkif_sector_t sector_number;
108 blkif_vdev_t handle;
109 uint16_t _pad1;
110 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
111 /*
112 * The maximum number of indirect segments (and pages) that will
113 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
114 * is also exported to the guest (via xenstore
115 * feature-max-indirect-segments entry), so the frontend knows how
116 * many indirect segments the backend supports.
117 */
118 uint64_t _pad2; /* make it 64 byte aligned */
119} __attribute__((__packed__));
120
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400121struct blkif_x86_32_request {
122 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800123 union {
124 struct blkif_x86_32_request_rw rw;
125 struct blkif_x86_32_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000126 struct blkif_x86_32_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200127 struct blkif_x86_32_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800128 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400129} __attribute__((__packed__));
130
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400131/* x86_64 protocol version */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800132
133struct blkif_x86_64_request_rw {
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400134 uint8_t nr_segments; /* number of segments */
135 blkif_vdev_t handle; /* only for read/write requests */
136 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
137 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800138 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
139 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400140} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800141
142struct blkif_x86_64_request_discard {
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400143 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400144 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
145 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
146 uint64_t id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800147 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400148 uint64_t nr_sectors;
149} __attribute__((__packed__));
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800150
David Vrabel0e367ae2013-03-07 17:32:01 +0000151struct blkif_x86_64_request_other {
152 uint8_t _pad1;
153 blkif_vdev_t _pad2;
154 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
155 uint64_t id; /* private guest value, echoed in resp */
156} __attribute__((__packed__));
157
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200158struct blkif_x86_64_request_indirect {
159 uint8_t indirect_op;
160 uint16_t nr_segments;
161 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
162 uint64_t id;
163 blkif_sector_t sector_number;
164 blkif_vdev_t handle;
165 uint16_t _pad2;
166 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
167 /*
168 * The maximum number of indirect segments (and pages) that will
169 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
170 * is also exported to the guest (via xenstore
171 * feature-max-indirect-segments entry), so the frontend knows how
172 * many indirect segments the backend supports.
173 */
174 uint32_t _pad3; /* make it 64 byte aligned */
175} __attribute__((__packed__));
176
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400177struct blkif_x86_64_request {
178 uint8_t operation; /* BLKIF_OP_??? */
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800179 union {
180 struct blkif_x86_64_request_rw rw;
181 struct blkif_x86_64_request_discard discard;
David Vrabel0e367ae2013-03-07 17:32:01 +0000182 struct blkif_x86_64_request_other other;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200183 struct blkif_x86_64_request_indirect indirect;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800184 } u;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400185} __attribute__((__packed__));
186
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400187DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
Jan Beulich4ae2cb92017-06-13 16:28:27 -0400188 struct blkif_response);
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400189DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
Jan Beulich4ae2cb92017-06-13 16:28:27 -0400190 struct blkif_response __packed);
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400191DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
Jan Beulich4ae2cb92017-06-13 16:28:27 -0400192 struct blkif_response);
Konrad Rzeszutek Wilk452a6b22011-05-12 16:31:51 -0400193
194union blkif_back_rings {
195 struct blkif_back_ring native;
196 struct blkif_common_back_ring common;
197 struct blkif_x86_32_back_ring x86_32;
198 struct blkif_x86_64_back_ring x86_64;
199};
200
201enum blkif_protocol {
202 BLKIF_PROTOCOL_NATIVE = 1,
203 BLKIF_PROTOCOL_X86_32 = 2,
204 BLKIF_PROTOCOL_X86_64 = 3,
205};
206
David Vrabelb042a3c2015-02-05 17:09:56 +0000207/*
208 * Default protocol if the frontend doesn't specify one.
209 */
210#ifdef CONFIG_X86
211# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
212#else
213# define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
214#endif
215
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400216struct xen_vbd {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400217 /* What the domain refers to this vbd as. */
218 blkif_vdev_t handle;
219 /* Non-zero -> read-only */
220 unsigned char readonly;
221 /* VDISK_xxx */
222 unsigned char type;
223 /* phys device that this vbd maps to. */
224 u32 pdevice;
225 struct block_device *bdev;
226 /* Cached size parameter. */
227 sector_t size;
Oliver Chick1f999572012-09-21 10:04:18 +0100228 unsigned int flush_support:1;
229 unsigned int discard_secure:1;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200230 unsigned int feature_gnt_persistent:1;
231 unsigned int overflow_max_grants:1;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400232};
233
234struct backend_info;
235
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200236/* Number of available flags */
237#define PERSISTENT_GNT_FLAGS_SIZE 2
238/* This persistent grant is currently in use */
239#define PERSISTENT_GNT_ACTIVE 0
240/*
241 * This persistent grant has been used, this flag is set when we remove the
242 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
243 */
244#define PERSISTENT_GNT_WAS_ACTIVE 1
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200245
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200246/* Number of requests that we can fit in a ring */
Bob Liu69b91ed2015-06-03 13:40:01 +0800247#define XEN_BLKIF_REQS_PER_PAGE 32
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200248
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200249struct persistent_gnt {
250 struct page *page;
251 grant_ref_t gnt;
252 grant_handle_t handle;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200253 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200254 struct rb_node node;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200255 struct list_head remove_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200256};
257
Bob Liu59795702015-11-14 11:12:15 +0800258/* Per-ring information. */
259struct xen_blkif_ring {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400260 /* Physical parameters of the comms window. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400261 unsigned int irq;
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400262 union blkif_back_rings blk_rings;
David Vrabel2d073842011-09-29 16:53:30 +0100263 void *blk_ring;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400264 /* Private fields. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400265 spinlock_t blk_ring_lock;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400266
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400267 wait_queue_head_t wq;
Roger Pau Monnec05f3e32014-02-04 11:26:14 +0100268 atomic_t inflight;
Juergen Grosse5c49c12017-05-18 17:28:47 +0200269 bool active;
Bob Liu59795702015-11-14 11:12:15 +0800270 /* One thread per blkif ring. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400271 struct task_struct *xenblkd;
272 unsigned int waiting_reqs;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400273
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200274 /* List of all 'pending_req' available */
275 struct list_head pending_free;
276 /* And its spinlock. */
277 spinlock_t pending_free_lock;
278 wait_queue_head_t pending_free_wq;
279
Bob Liud4bf0062015-11-14 11:12:19 +0800280 /* Tree to store persistent grants. */
281 spinlock_t pers_gnts_lock;
282 struct rb_root persistent_gnts;
283 unsigned int persistent_gnt_c;
284 atomic_t persistent_gnt_in_use;
285 unsigned long next_lru;
286
Bob Liudb6fbc12015-12-09 07:44:02 +0800287 /* Statistics. */
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400288 unsigned long st_print;
Bob Liudb6fbc12015-12-09 07:44:02 +0800289 unsigned long long st_rd_req;
290 unsigned long long st_wr_req;
291 unsigned long long st_oo_req;
292 unsigned long long st_f_req;
293 unsigned long long st_ds_req;
294 unsigned long long st_rd_sect;
295 unsigned long long st_wr_sect;
296
Bob Liud4bf0062015-11-14 11:12:19 +0800297 /* Used by the kworker that offload work from the persistent purge. */
298 struct list_head persistent_purge_list;
299 struct work_struct persistent_purge_work;
300
301 /* Buffer of free pages to map grant refs. */
302 spinlock_t free_pages_lock;
303 int free_pages_num;
304 struct list_head free_pages;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400305
Valentin Priescu814d04e2014-05-20 22:28:50 +0200306 struct work_struct free_work;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500307 /* Thread shutdown wait queue. */
308 wait_queue_head_t shutdown_wq;
Bob Liu59795702015-11-14 11:12:15 +0800309 struct xen_blkif *blkif;
310};
311
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400312struct xen_blkif {
313 /* Unique identifier for this interface. */
314 domid_t domid;
315 unsigned int handle;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400316 /* Comms information. */
317 enum blkif_protocol blk_protocol;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400318 /* The VBD attached to this interface. */
319 struct xen_vbd vbd;
320 /* Back pointer to the backend_info. */
321 struct backend_info *be;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400322 atomic_t refcnt;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400323 /* for barrier (drain) requests */
324 struct completion drain_complete;
325 atomic_t drain;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400326
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400327 struct work_struct free_work;
Bob Liu59795702015-11-14 11:12:15 +0800328 unsigned int nr_ring_pages;
329 /* All rings for this device. */
Konrad Rzeszutek Wilk2fb1ef42015-12-11 12:08:48 -0500330 struct xen_blkif_ring *rings;
331 unsigned int nr_rings;
Konrad Rzeszutek Wilk54893772011-04-14 17:21:50 -0400332};
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400333
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200334struct seg_buf {
335 unsigned long offset;
336 unsigned int nsec;
337};
338
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200339struct grant_page {
340 struct page *page;
341 struct persistent_gnt *persistent_gnt;
342 grant_handle_t handle;
343 grant_ref_t gref;
344};
345
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200346/*
347 * Each outstanding request that we've passed to the lower device layers has a
348 * 'pending_req' allocated to it. Each buffer_head that completes decrements
349 * the pendcnt towards zero. When it hits zero, the specified domain has a
350 * response queued for it, with the saved 'id' passed back.
351 */
352struct pending_req {
Bob Liu59795702015-11-14 11:12:15 +0800353 struct xen_blkif_ring *ring;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200354 u64 id;
Julien Grall6684fa12015-06-17 15:28:08 +0100355 int nr_segs;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200356 atomic_t pendcnt;
357 unsigned short operation;
358 int status;
359 struct list_head free_list;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200360 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200361 /* Indirect descriptors */
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200362 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200363 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
364 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000365 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
366 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
367 struct gntab_unmap_queue_data gnttab_unmap_data;
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200368};
369
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400370
371#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
372 (_v)->bdev->bd_part->nr_sects : \
373 get_capacity((_v)->bdev->bd_disk))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400374
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400375#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
376#define xen_blkif_put(_b) \
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400377 do { \
378 if (atomic_dec_and_test(&(_b)->refcnt)) \
Valentin Priescu814d04e2014-05-20 22:28:50 +0200379 schedule_work(&(_b)->free_work);\
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400380 } while (0)
381
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400382struct phys_req {
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400383 unsigned short dev;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800384 blkif_sector_t nr_sects;
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400385 struct block_device *bdev;
386 blkif_sector_t sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400387};
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400388int xen_blkif_interface_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400389
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400390int xen_blkif_xenbus_init(void);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400391
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400392irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
393int xen_blkif_schedule(void *arg);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200394int xen_blkif_purge_persistent(void *arg);
Bob Liu59795702015-11-14 11:12:15 +0800395void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400396
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -0400397int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
398 struct backend_info *be, int state);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400399
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400400int xen_blkbk_barrier(struct xenbus_transaction xbt,
401 struct backend_info *be, int state);
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400402struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
Roger Pau Monneabb97b82014-02-11 20:34:03 -0700403void xen_blkbk_unmap_purged_grants(struct work_struct *work);
Jeremy Fitzhardinge98e036a2010-03-18 15:35:05 -0700404
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400405static inline void blkif_get_x86_32_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400406 struct blkif_x86_32_request *src)
407{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200408 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Roger Pau Monné1f13d752015-11-03 16:34:09 +0000409 dst->operation = READ_ONCE(src->operation);
410 switch (dst->operation) {
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800411 case BLKIF_OP_READ:
412 case BLKIF_OP_WRITE:
413 case BLKIF_OP_WRITE_BARRIER:
414 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400415 dst->u.rw.nr_segments = src->u.rw.nr_segments;
416 dst->u.rw.handle = src->u.rw.handle;
417 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800418 dst->u.rw.sector_number = src->u.rw.sector_number;
419 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400420 if (n > dst->u.rw.nr_segments)
421 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800422 for (i = 0; i < n; i++)
423 dst->u.rw.seg[i] = src->u.rw.seg[i];
424 break;
425 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400426 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400427 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800428 dst->u.discard.sector_number = src->u.discard.sector_number;
429 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
430 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200431 case BLKIF_OP_INDIRECT:
432 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
433 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
434 dst->u.indirect.handle = src->u.indirect.handle;
435 dst->u.indirect.id = src->u.indirect.id;
436 dst->u.indirect.sector_number = src->u.indirect.sector_number;
437 barrier();
438 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
439 for (i = 0; i < j; i++)
440 dst->u.indirect.indirect_grefs[i] =
441 src->u.indirect.indirect_grefs[i];
442 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800443 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000444 /*
445 * Don't know how to translate this op. Only get the
446 * ID so failure can be reported to the frontend.
447 */
448 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800449 break;
450 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400451}
452
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -0400453static inline void blkif_get_x86_64_req(struct blkif_request *dst,
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400454 struct blkif_x86_64_request *src)
455{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200456 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
Roger Pau Monné1f13d752015-11-03 16:34:09 +0000457 dst->operation = READ_ONCE(src->operation);
458 switch (dst->operation) {
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800459 case BLKIF_OP_READ:
460 case BLKIF_OP_WRITE:
461 case BLKIF_OP_WRITE_BARRIER:
462 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400463 dst->u.rw.nr_segments = src->u.rw.nr_segments;
464 dst->u.rw.handle = src->u.rw.handle;
465 dst->u.rw.id = src->u.rw.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800466 dst->u.rw.sector_number = src->u.rw.sector_number;
467 barrier();
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400468 if (n > dst->u.rw.nr_segments)
469 n = dst->u.rw.nr_segments;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800470 for (i = 0; i < n; i++)
471 dst->u.rw.seg[i] = src->u.rw.seg[i];
472 break;
473 case BLKIF_OP_DISCARD:
Konrad Rzeszutek Wilk5ea42982011-10-12 16:23:30 -0400474 dst->u.discard.flag = src->u.discard.flag;
Konrad Rzeszutek Wilk8c9ce602012-05-25 16:11:09 -0400475 dst->u.discard.id = src->u.discard.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800476 dst->u.discard.sector_number = src->u.discard.sector_number;
477 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
478 break;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200479 case BLKIF_OP_INDIRECT:
480 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
481 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
482 dst->u.indirect.handle = src->u.indirect.handle;
483 dst->u.indirect.id = src->u.indirect.id;
484 dst->u.indirect.sector_number = src->u.indirect.sector_number;
485 barrier();
486 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
487 for (i = 0; i < j; i++)
488 dst->u.indirect.indirect_grefs[i] =
489 src->u.indirect.indirect_grefs[i];
490 break;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800491 default:
David Vrabel0e367ae2013-03-07 17:32:01 +0000492 /*
493 * Don't know how to translate this op. Only get the
494 * ID so failure can be reported to the frontend.
495 */
496 dst->u.other.id = src->u.other.id;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800497 break;
498 }
Konrad Rzeszutek Wilk68c88dd2011-05-11 16:23:39 -0400499}
500
Konrad Rzeszutek Wilk5a577e32011-05-12 16:58:21 -0400501#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */