Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * blkfront.c |
| 3 | * |
| 4 | * XenLinux virtual block device driver. |
| 5 | * |
| 6 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand |
| 7 | * Modifications by Mark A. Williamson are (c) Intel Research Cambridge |
| 8 | * Copyright (c) 2004, Christian Limpach |
| 9 | * Copyright (c) 2004, Andrew Warfield |
| 10 | * Copyright (c) 2005, Christopher Clark |
| 11 | * Copyright (c) 2005, XenSource Ltd |
| 12 | * |
| 13 | * This program is free software; you can redistribute it and/or |
| 14 | * modify it under the terms of the GNU General Public License version 2 |
| 15 | * as published by the Free Software Foundation; or, when distributed |
| 16 | * separately from the Linux kernel or incorporated into other |
| 17 | * software packages, subject to the following license: |
| 18 | * |
| 19 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 20 | * of this source file (the "Software"), to deal in the Software without |
| 21 | * restriction, including without limitation the rights to use, copy, modify, |
| 22 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
| 23 | * and to permit persons to whom the Software is furnished to do so, subject to |
| 24 | * the following conditions: |
| 25 | * |
| 26 | * The above copyright notice and this permission notice shall be included in |
| 27 | * all copies or substantial portions of the Software. |
| 28 | * |
| 29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 30 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 31 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 32 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 33 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 34 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 35 | * IN THE SOFTWARE. |
| 36 | */ |
| 37 | |
| 38 | #include <linux/interrupt.h> |
| 39 | #include <linux/blkdev.h> |
Ian Campbell | 597592d | 2008-02-21 13:03:45 -0800 | [diff] [blame] | 40 | #include <linux/hdreg.h> |
Christian Limpach | 440a01a | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 41 | #include <linux/cdrom.h> |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 42 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 43 | #include <linux/slab.h> |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 44 | #include <linux/mutex.h> |
Jens Axboe | 9e973e6 | 2009-02-24 08:10:09 +0100 | [diff] [blame] | 45 | #include <linux/scatterlist.h> |
Akinobu Mita | 34ae2e4 | 2012-01-21 00:15:26 +0900 | [diff] [blame] | 46 | #include <linux/bitmap.h> |
Roger Pau Monne | 155b7ed | 2013-03-18 17:49:34 +0100 | [diff] [blame] | 47 | #include <linux/list.h> |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 48 | |
Jeremy Fitzhardinge | 1ccbf53 | 2009-10-06 15:11:14 -0700 | [diff] [blame] | 49 | #include <xen/xen.h> |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 50 | #include <xen/xenbus.h> |
| 51 | #include <xen/grant_table.h> |
| 52 | #include <xen/events.h> |
| 53 | #include <xen/page.h> |
Stefano Stabellini | c1c5413 | 2010-05-14 12:44:30 +0100 | [diff] [blame] | 54 | #include <xen/platform_pci.h> |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 55 | |
| 56 | #include <xen/interface/grant_table.h> |
| 57 | #include <xen/interface/io/blkif.h> |
Markus Armbruster | 3e33423 | 2008-04-02 10:54:02 -0700 | [diff] [blame] | 58 | #include <xen/interface/io/protocols.h> |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 59 | |
| 60 | #include <asm/xen/hypervisor.h> |
| 61 | |
| 62 | enum blkif_state { |
| 63 | BLKIF_STATE_DISCONNECTED, |
| 64 | BLKIF_STATE_CONNECTED, |
| 65 | BLKIF_STATE_SUSPENDED, |
| 66 | }; |
| 67 | |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 68 | struct grant { |
| 69 | grant_ref_t gref; |
| 70 | unsigned long pfn; |
Roger Pau Monne | 155b7ed | 2013-03-18 17:49:34 +0100 | [diff] [blame] | 71 | struct list_head node; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 72 | }; |
| 73 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 74 | struct blk_shadow { |
| 75 | struct blkif_request req; |
Jeremy Fitzhardinge | a945b98 | 2010-11-01 17:03:14 -0400 | [diff] [blame] | 76 | struct request *request; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 77 | struct grant **grants_used; |
| 78 | struct grant **indirect_grants; |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 79 | struct scatterlist *sg; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 80 | }; |
| 81 | |
| 82 | struct split_bio { |
| 83 | struct bio *bio; |
| 84 | atomic_t pending; |
| 85 | int err; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 86 | }; |
| 87 | |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 88 | static DEFINE_MUTEX(blkfront_mutex); |
Alexey Dobriyan | 83d5cde | 2009-09-21 17:01:13 -0700 | [diff] [blame] | 89 | static const struct block_device_operations xlvbd_block_fops; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 90 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 91 | /* |
| 92 | * Maximum number of segments in indirect requests, the actual value used by |
| 93 | * the frontend driver is the minimum of this value and the value provided |
| 94 | * by the backend driver. |
| 95 | */ |
| 96 | |
| 97 | static unsigned int xen_blkif_max_segments = 32; |
Konrad Rzeszutek Wilk | 2d5dc3b | 2013-05-15 10:39:34 -0400 | [diff] [blame] | 98 | module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); |
| 99 | MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)"); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 100 | |
Jeremy Fitzhardinge | 667c78af | 2010-12-08 12:39:12 -0800 | [diff] [blame] | 101 | #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 102 | |
| 103 | /* |
| 104 | * We have one of these per vbd, whether ide, scsi or 'other'. They |
| 105 | * hang in private_data off the gendisk structure. We may end up |
| 106 | * putting all kinds of interesting stuff here :-) |
| 107 | */ |
| 108 | struct blkfront_info |
| 109 | { |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 110 | spinlock_t io_lock; |
Daniel Stodden | b70f5fa | 2010-04-30 22:01:19 +0000 | [diff] [blame] | 111 | struct mutex mutex; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 112 | struct xenbus_device *xbdev; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 113 | struct gendisk *gd; |
| 114 | int vdevice; |
| 115 | blkif_vdev_t handle; |
| 116 | enum blkif_state connected; |
| 117 | int ring_ref; |
| 118 | struct blkif_front_ring ring; |
| 119 | unsigned int evtchn, irq; |
| 120 | struct request_queue *rq; |
| 121 | struct work_struct work; |
| 122 | struct gnttab_free_callback callback; |
| 123 | struct blk_shadow shadow[BLK_RING_SIZE]; |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 124 | struct list_head grants; |
| 125 | struct list_head indirect_pages; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 126 | unsigned int persistent_gnts_c; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 127 | unsigned long shadow_free; |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 128 | unsigned int feature_flush; |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 129 | unsigned int flush_op; |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 130 | unsigned int feature_discard:1; |
| 131 | unsigned int feature_secdiscard:1; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 132 | unsigned int discard_granularity; |
| 133 | unsigned int discard_alignment; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 134 | unsigned int feature_persistent:1; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 135 | unsigned int max_indirect_segments; |
Christian Limpach | 1d78d70 | 2008-04-02 10:54:04 -0700 | [diff] [blame] | 136 | int is_ready; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 137 | }; |
| 138 | |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 139 | static unsigned int nr_minors; |
| 140 | static unsigned long *minors; |
| 141 | static DEFINE_SPINLOCK(minor_lock); |
| 142 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 143 | #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ |
| 144 | (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) |
| 145 | #define GRANT_INVALID_REF 0 |
| 146 | |
| 147 | #define PARTS_PER_DISK 16 |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 148 | #define PARTS_PER_EXT_DISK 256 |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 149 | |
| 150 | #define BLKIF_MAJOR(dev) ((dev)>>8) |
| 151 | #define BLKIF_MINOR(dev) ((dev) & 0xff) |
| 152 | |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 153 | #define EXT_SHIFT 28 |
| 154 | #define EXTENDED (1<<EXT_SHIFT) |
| 155 | #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) |
| 156 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) |
Stefano Stabellini | c80a420 | 2010-12-02 17:55:00 +0000 | [diff] [blame] | 157 | #define EMULATED_HD_DISK_MINOR_OFFSET (0) |
| 158 | #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) |
Stefan Bader | 196cfe2 | 2011-07-14 15:30:22 +0200 | [diff] [blame] | 159 | #define EMULATED_SD_DISK_MINOR_OFFSET (0) |
| 160 | #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 161 | |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 162 | #define DEV_NAME "xvd" /* name in /dev */ |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 163 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 164 | #define SEGS_PER_INDIRECT_FRAME \ |
| 165 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) |
| 166 | #define INDIRECT_GREFS(_segs) \ |
| 167 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) |
| 168 | |
| 169 | static int blkfront_setup_indirect(struct blkfront_info *info); |
| 170 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 171 | static int get_id_from_freelist(struct blkfront_info *info) |
| 172 | { |
| 173 | unsigned long free = info->shadow_free; |
Roel Kluin | b9ed725 | 2009-05-22 09:25:32 +0200 | [diff] [blame] | 174 | BUG_ON(free >= BLK_RING_SIZE); |
Konrad Rzeszutek Wilk | 97e3683 | 2011-10-12 12:12:36 -0400 | [diff] [blame] | 175 | info->shadow_free = info->shadow[free].req.u.rw.id; |
| 176 | info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 177 | return free; |
| 178 | } |
| 179 | |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 180 | static int add_id_to_freelist(struct blkfront_info *info, |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 181 | unsigned long id) |
| 182 | { |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 183 | if (info->shadow[id].req.u.rw.id != id) |
| 184 | return -EINVAL; |
| 185 | if (info->shadow[id].request == NULL) |
| 186 | return -EINVAL; |
Konrad Rzeszutek Wilk | 97e3683 | 2011-10-12 12:12:36 -0400 | [diff] [blame] | 187 | info->shadow[id].req.u.rw.id = info->shadow_free; |
Jeremy Fitzhardinge | a945b98 | 2010-11-01 17:03:14 -0400 | [diff] [blame] | 188 | info->shadow[id].request = NULL; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 189 | info->shadow_free = id; |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 190 | return 0; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 191 | } |
| 192 | |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 193 | static int fill_grant_buffer(struct blkfront_info *info, int num) |
| 194 | { |
| 195 | struct page *granted_page; |
| 196 | struct grant *gnt_list_entry, *n; |
| 197 | int i = 0; |
| 198 | |
| 199 | while(i < num) { |
| 200 | gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); |
| 201 | if (!gnt_list_entry) |
| 202 | goto out_of_memory; |
| 203 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 204 | if (info->feature_persistent) { |
| 205 | granted_page = alloc_page(GFP_NOIO); |
| 206 | if (!granted_page) { |
| 207 | kfree(gnt_list_entry); |
| 208 | goto out_of_memory; |
| 209 | } |
| 210 | gnt_list_entry->pfn = page_to_pfn(granted_page); |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 211 | } |
| 212 | |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 213 | gnt_list_entry->gref = GRANT_INVALID_REF; |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 214 | list_add(&gnt_list_entry->node, &info->grants); |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 215 | i++; |
| 216 | } |
| 217 | |
| 218 | return 0; |
| 219 | |
| 220 | out_of_memory: |
| 221 | list_for_each_entry_safe(gnt_list_entry, n, |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 222 | &info->grants, node) { |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 223 | list_del(&gnt_list_entry->node); |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 224 | if (info->feature_persistent) |
| 225 | __free_page(pfn_to_page(gnt_list_entry->pfn)); |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 226 | kfree(gnt_list_entry); |
| 227 | i--; |
| 228 | } |
| 229 | BUG_ON(i != 0); |
| 230 | return -ENOMEM; |
| 231 | } |
| 232 | |
| 233 | static struct grant *get_grant(grant_ref_t *gref_head, |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 234 | unsigned long pfn, |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 235 | struct blkfront_info *info) |
| 236 | { |
| 237 | struct grant *gnt_list_entry; |
| 238 | unsigned long buffer_mfn; |
| 239 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 240 | BUG_ON(list_empty(&info->grants)); |
| 241 | gnt_list_entry = list_first_entry(&info->grants, struct grant, |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 242 | node); |
| 243 | list_del(&gnt_list_entry->node); |
| 244 | |
| 245 | if (gnt_list_entry->gref != GRANT_INVALID_REF) { |
| 246 | info->persistent_gnts_c--; |
| 247 | return gnt_list_entry; |
| 248 | } |
| 249 | |
| 250 | /* Assign a gref to this page */ |
| 251 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); |
| 252 | BUG_ON(gnt_list_entry->gref == -ENOSPC); |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 253 | if (!info->feature_persistent) { |
| 254 | BUG_ON(!pfn); |
| 255 | gnt_list_entry->pfn = pfn; |
| 256 | } |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 257 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); |
| 258 | gnttab_grant_foreign_access_ref(gnt_list_entry->gref, |
| 259 | info->xbdev->otherend_id, |
| 260 | buffer_mfn, 0); |
| 261 | return gnt_list_entry; |
| 262 | } |
| 263 | |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 264 | static const char *op_name(int op) |
| 265 | { |
| 266 | static const char *const names[] = { |
| 267 | [BLKIF_OP_READ] = "read", |
| 268 | [BLKIF_OP_WRITE] = "write", |
| 269 | [BLKIF_OP_WRITE_BARRIER] = "barrier", |
| 270 | [BLKIF_OP_FLUSH_DISKCACHE] = "flush", |
| 271 | [BLKIF_OP_DISCARD] = "discard" }; |
| 272 | |
| 273 | if (op < 0 || op >= ARRAY_SIZE(names)) |
| 274 | return "unknown"; |
| 275 | |
| 276 | if (!names[op]) |
| 277 | return "reserved"; |
| 278 | |
| 279 | return names[op]; |
| 280 | } |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 281 | static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) |
| 282 | { |
| 283 | unsigned int end = minor + nr; |
| 284 | int rc; |
| 285 | |
| 286 | if (end > nr_minors) { |
| 287 | unsigned long *bitmap, *old; |
| 288 | |
Thomas Meyer | f094148 | 2011-11-29 22:08:00 +0100 | [diff] [blame] | 289 | bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap), |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 290 | GFP_KERNEL); |
| 291 | if (bitmap == NULL) |
| 292 | return -ENOMEM; |
| 293 | |
| 294 | spin_lock(&minor_lock); |
| 295 | if (end > nr_minors) { |
| 296 | old = minors; |
| 297 | memcpy(bitmap, minors, |
| 298 | BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); |
| 299 | minors = bitmap; |
| 300 | nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; |
| 301 | } else |
| 302 | old = bitmap; |
| 303 | spin_unlock(&minor_lock); |
| 304 | kfree(old); |
| 305 | } |
| 306 | |
| 307 | spin_lock(&minor_lock); |
| 308 | if (find_next_bit(minors, end, minor) >= end) { |
Akinobu Mita | 34ae2e4 | 2012-01-21 00:15:26 +0900 | [diff] [blame] | 309 | bitmap_set(minors, minor, nr); |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 310 | rc = 0; |
| 311 | } else |
| 312 | rc = -EBUSY; |
| 313 | spin_unlock(&minor_lock); |
| 314 | |
| 315 | return rc; |
| 316 | } |
| 317 | |
| 318 | static void xlbd_release_minors(unsigned int minor, unsigned int nr) |
| 319 | { |
| 320 | unsigned int end = minor + nr; |
| 321 | |
| 322 | BUG_ON(end > nr_minors); |
| 323 | spin_lock(&minor_lock); |
Akinobu Mita | 34ae2e4 | 2012-01-21 00:15:26 +0900 | [diff] [blame] | 324 | bitmap_clear(minors, minor, nr); |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 325 | spin_unlock(&minor_lock); |
| 326 | } |
| 327 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 328 | static void blkif_restart_queue_callback(void *arg) |
| 329 | { |
| 330 | struct blkfront_info *info = (struct blkfront_info *)arg; |
| 331 | schedule_work(&info->work); |
| 332 | } |
| 333 | |
Harvey Harrison | afe42d7 | 2008-04-29 00:59:47 -0700 | [diff] [blame] | 334 | static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) |
Ian Campbell | 597592d | 2008-02-21 13:03:45 -0800 | [diff] [blame] | 335 | { |
| 336 | /* We don't have real geometry info, but let's at least return |
| 337 | values consistent with the size of the device */ |
| 338 | sector_t nsect = get_capacity(bd->bd_disk); |
| 339 | sector_t cylinders = nsect; |
| 340 | |
| 341 | hg->heads = 0xff; |
| 342 | hg->sectors = 0x3f; |
| 343 | sector_div(cylinders, hg->heads * hg->sectors); |
| 344 | hg->cylinders = cylinders; |
| 345 | if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) |
| 346 | hg->cylinders = 0xffff; |
| 347 | return 0; |
| 348 | } |
| 349 | |
Al Viro | a63c848 | 2008-03-02 10:23:47 -0500 | [diff] [blame] | 350 | static int blkif_ioctl(struct block_device *bdev, fmode_t mode, |
Adrian Bunk | 62aa005 | 2008-08-04 11:59:05 +0200 | [diff] [blame] | 351 | unsigned command, unsigned long argument) |
Christian Limpach | 440a01a | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 352 | { |
Al Viro | a63c848 | 2008-03-02 10:23:47 -0500 | [diff] [blame] | 353 | struct blkfront_info *info = bdev->bd_disk->private_data; |
Christian Limpach | 440a01a | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 354 | int i; |
| 355 | |
| 356 | dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", |
| 357 | command, (long)argument); |
| 358 | |
| 359 | switch (command) { |
| 360 | case CDROMMULTISESSION: |
| 361 | dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); |
| 362 | for (i = 0; i < sizeof(struct cdrom_multisession); i++) |
| 363 | if (put_user(0, (char __user *)(argument + i))) |
| 364 | return -EFAULT; |
| 365 | return 0; |
| 366 | |
| 367 | case CDROM_GET_CAPABILITY: { |
| 368 | struct gendisk *gd = info->gd; |
| 369 | if (gd->flags & GENHD_FL_CD) |
| 370 | return 0; |
| 371 | return -EINVAL; |
| 372 | } |
| 373 | |
| 374 | default: |
| 375 | /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", |
| 376 | command);*/ |
| 377 | return -EINVAL; /* same return as native Linux */ |
| 378 | } |
| 379 | |
| 380 | return 0; |
| 381 | } |
| 382 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 383 | /* |
Jeremy Fitzhardinge | c64e38e | 2010-11-01 14:32:27 -0400 | [diff] [blame] | 384 | * Generate a Xen blkfront IO request from a blk layer request. Reads |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 385 | * and writes are handled as expected. |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 386 | * |
Jeremy Fitzhardinge | c64e38e | 2010-11-01 14:32:27 -0400 | [diff] [blame] | 387 | * @req: a request struct |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 388 | */ |
| 389 | static int blkif_queue_request(struct request *req) |
| 390 | { |
| 391 | struct blkfront_info *info = req->rq_disk->private_data; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 392 | struct blkif_request *ring_req; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 393 | unsigned long id; |
| 394 | unsigned int fsect, lsect; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 395 | int i, ref, n; |
| 396 | struct blkif_request_segment_aligned *segments = NULL; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 397 | |
| 398 | /* |
| 399 | * Used to store if we are able to queue the request by just using |
| 400 | * existing persistent grants, or if we have to get new grants, |
| 401 | * as there are not sufficiently many free. |
| 402 | */ |
| 403 | bool new_persistent_gnts; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 404 | grant_ref_t gref_head; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 405 | struct grant *gnt_list_entry = NULL; |
Jens Axboe | 9e973e6 | 2009-02-24 08:10:09 +0100 | [diff] [blame] | 406 | struct scatterlist *sg; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 407 | int nseg, max_grefs; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 408 | |
| 409 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) |
| 410 | return 1; |
| 411 | |
Roger Pau Monne | c47206e | 2013-08-12 12:53:43 +0200 | [diff] [blame] | 412 | max_grefs = req->nr_phys_segments; |
| 413 | if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) |
| 414 | /* |
| 415 | * If we are using indirect segments we need to account |
| 416 | * for the indirect grefs used in the request. |
| 417 | */ |
| 418 | max_grefs += INDIRECT_GREFS(req->nr_phys_segments); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 419 | |
| 420 | /* Check if we have enough grants to allocate a requests */ |
| 421 | if (info->persistent_gnts_c < max_grefs) { |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 422 | new_persistent_gnts = 1; |
| 423 | if (gnttab_alloc_grant_references( |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 424 | max_grefs - info->persistent_gnts_c, |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 425 | &gref_head) < 0) { |
| 426 | gnttab_request_free_callback( |
| 427 | &info->callback, |
| 428 | blkif_restart_queue_callback, |
| 429 | info, |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 430 | max_grefs); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 431 | return 1; |
| 432 | } |
| 433 | } else |
| 434 | new_persistent_gnts = 0; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 435 | |
| 436 | /* Fill out a communications ring structure. */ |
| 437 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); |
| 438 | id = get_id_from_freelist(info); |
Jeremy Fitzhardinge | a945b98 | 2010-11-01 17:03:14 -0400 | [diff] [blame] | 439 | info->shadow[id].request = req; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 440 | |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 441 | if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 442 | ring_req->operation = BLKIF_OP_DISCARD; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 443 | ring_req->u.discard.nr_sectors = blk_rq_sectors(req); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 444 | ring_req->u.discard.id = id; |
| 445 | ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 446 | if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) |
| 447 | ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; |
| 448 | else |
| 449 | ring_req->u.discard.flag = 0; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 450 | } else { |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 451 | BUG_ON(info->max_indirect_segments == 0 && |
| 452 | req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
| 453 | BUG_ON(info->max_indirect_segments && |
| 454 | req->nr_phys_segments > info->max_indirect_segments); |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 455 | nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 456 | ring_req->u.rw.id = id; |
| 457 | if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
| 458 | /* |
| 459 | * The indirect operation can only be a BLKIF_OP_READ or |
| 460 | * BLKIF_OP_WRITE |
| 461 | */ |
| 462 | BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); |
| 463 | ring_req->operation = BLKIF_OP_INDIRECT; |
| 464 | ring_req->u.indirect.indirect_op = rq_data_dir(req) ? |
| 465 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
| 466 | ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); |
| 467 | ring_req->u.indirect.handle = info->handle; |
| 468 | ring_req->u.indirect.nr_segments = nseg; |
| 469 | } else { |
| 470 | ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); |
| 471 | ring_req->u.rw.handle = info->handle; |
| 472 | ring_req->operation = rq_data_dir(req) ? |
| 473 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
| 474 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { |
| 475 | /* |
| 476 | * Ideally we can do an unordered flush-to-disk. In case the |
| 477 | * backend onlysupports barriers, use that. A barrier request |
| 478 | * a superset of FUA, so we can implement it the same |
| 479 | * way. (It's also a FLUSH+FUA, since it is |
| 480 | * guaranteed ordered WRT previous writes.) |
| 481 | */ |
| 482 | ring_req->operation = info->flush_op; |
| 483 | } |
| 484 | ring_req->u.rw.nr_segments = nseg; |
| 485 | } |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 486 | for_each_sg(info->shadow[id].sg, sg, nseg, i) { |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 487 | fsect = sg->offset >> 9; |
| 488 | lsect = fsect + (sg->length >> 9) - 1; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 489 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 490 | if ((ring_req->operation == BLKIF_OP_INDIRECT) && |
| 491 | (i % SEGS_PER_INDIRECT_FRAME == 0)) { |
Tim Gardner | 427bfe0 | 2013-11-14 14:29:52 -0700 | [diff] [blame] | 492 | unsigned long uninitialized_var(pfn); |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 493 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 494 | if (segments) |
| 495 | kunmap_atomic(segments); |
| 496 | |
| 497 | n = i / SEGS_PER_INDIRECT_FRAME; |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 498 | if (!info->feature_persistent) { |
| 499 | struct page *indirect_page; |
| 500 | |
| 501 | /* Fetch a pre-allocated page to use for indirect grefs */ |
| 502 | BUG_ON(list_empty(&info->indirect_pages)); |
| 503 | indirect_page = list_first_entry(&info->indirect_pages, |
| 504 | struct page, lru); |
| 505 | list_del(&indirect_page->lru); |
| 506 | pfn = page_to_pfn(indirect_page); |
| 507 | } |
| 508 | gnt_list_entry = get_grant(&gref_head, pfn, info); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 509 | info->shadow[id].indirect_grants[n] = gnt_list_entry; |
| 510 | segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); |
| 511 | ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; |
| 512 | } |
| 513 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 514 | gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 515 | ref = gnt_list_entry->gref; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 516 | |
| 517 | info->shadow[id].grants_used[i] = gnt_list_entry; |
| 518 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 519 | if (rq_data_dir(req) && info->feature_persistent) { |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 520 | char *bvec_data; |
| 521 | void *shared_data; |
| 522 | |
| 523 | BUG_ON(sg->offset + sg->length > PAGE_SIZE); |
| 524 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 525 | shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 526 | bvec_data = kmap_atomic(sg_page(sg)); |
| 527 | |
| 528 | /* |
| 529 | * this does not wipe data stored outside the |
| 530 | * range sg->offset..sg->offset+sg->length. |
| 531 | * Therefore, blkback *could* see data from |
| 532 | * previous requests. This is OK as long as |
| 533 | * persistent grants are shared with just one |
| 534 | * domain. It may need refactoring if this |
| 535 | * changes |
| 536 | */ |
| 537 | memcpy(shared_data + sg->offset, |
| 538 | bvec_data + sg->offset, |
| 539 | sg->length); |
| 540 | |
| 541 | kunmap_atomic(bvec_data); |
| 542 | kunmap_atomic(shared_data); |
| 543 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 544 | if (ring_req->operation != BLKIF_OP_INDIRECT) { |
| 545 | ring_req->u.rw.seg[i] = |
| 546 | (struct blkif_request_segment) { |
| 547 | .gref = ref, |
| 548 | .first_sect = fsect, |
| 549 | .last_sect = lsect }; |
| 550 | } else { |
| 551 | n = i % SEGS_PER_INDIRECT_FRAME; |
| 552 | segments[n] = |
| 553 | (struct blkif_request_segment_aligned) { |
| 554 | .gref = ref, |
| 555 | .first_sect = fsect, |
| 556 | .last_sect = lsect }; |
| 557 | } |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 558 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 559 | if (segments) |
| 560 | kunmap_atomic(segments); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | info->ring.req_prod_pvt++; |
| 564 | |
| 565 | /* Keep a private copy so we can reissue requests when recovering. */ |
| 566 | info->shadow[id].req = *ring_req; |
| 567 | |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 568 | if (new_persistent_gnts) |
| 569 | gnttab_free_grant_references(gref_head); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 570 | |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | |
| 575 | static inline void flush_requests(struct blkfront_info *info) |
| 576 | { |
| 577 | int notify; |
| 578 | |
| 579 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); |
| 580 | |
| 581 | if (notify) |
| 582 | notify_remote_via_irq(info->irq); |
| 583 | } |
| 584 | |
| 585 | /* |
| 586 | * do_blkif_request |
| 587 | * read a block; request is in a request queue |
| 588 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 589 | static void do_blkif_request(struct request_queue *rq) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 590 | { |
| 591 | struct blkfront_info *info = NULL; |
| 592 | struct request *req; |
| 593 | int queued; |
| 594 | |
| 595 | pr_debug("Entered do_blkif_request\n"); |
| 596 | |
| 597 | queued = 0; |
| 598 | |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 599 | while ((req = blk_peek_request(rq)) != NULL) { |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 600 | info = req->rq_disk->private_data; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 601 | |
| 602 | if (RING_FULL(&info->ring)) |
| 603 | goto wait; |
| 604 | |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 605 | blk_start_request(req); |
Tejun Heo | 296b2f6 | 2009-05-08 11:54:15 +0900 | [diff] [blame] | 606 | |
Konrad Rzeszutek Wilk | d11e615 | 2011-09-16 15:15:14 -0400 | [diff] [blame] | 607 | if ((req->cmd_type != REQ_TYPE_FS) || |
| 608 | ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && |
| 609 | !info->flush_op)) { |
Tejun Heo | 296b2f6 | 2009-05-08 11:54:15 +0900 | [diff] [blame] | 610 | __blk_end_request_all(req, -EIO); |
| 611 | continue; |
| 612 | } |
| 613 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 614 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 615 | "(%u/%u) buffer:%p [%s]\n", |
| 616 | req, req->cmd, (unsigned long)blk_rq_pos(req), |
| 617 | blk_rq_cur_sectors(req), blk_rq_sectors(req), |
| 618 | req->buffer, rq_data_dir(req) ? "write" : "read"); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 619 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 620 | if (blkif_queue_request(req)) { |
| 621 | blk_requeue_request(rq, req); |
| 622 | wait: |
| 623 | /* Avoid pointless unplugs. */ |
| 624 | blk_stop_queue(rq); |
| 625 | break; |
| 626 | } |
| 627 | |
| 628 | queued++; |
| 629 | } |
| 630 | |
| 631 | if (queued != 0) |
| 632 | flush_requests(info); |
| 633 | } |
| 634 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 635 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 636 | unsigned int physical_sector_size, |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 637 | unsigned int segments) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 638 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 639 | struct request_queue *rq; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 640 | struct blkfront_info *info = gd->private_data; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 641 | |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 642 | rq = blk_init_queue(do_blkif_request, &info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 643 | if (rq == NULL) |
| 644 | return -1; |
| 645 | |
Fernando Luis Vázquez Cao | 66d352e | 2008-10-27 18:45:54 +0900 | [diff] [blame] | 646 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 647 | |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 648 | if (info->feature_discard) { |
| 649 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); |
| 650 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); |
| 651 | rq->limits.discard_granularity = info->discard_granularity; |
| 652 | rq->limits.discard_alignment = info->discard_alignment; |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 653 | if (info->feature_secdiscard) |
| 654 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 655 | } |
| 656 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 657 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 658 | blk_queue_logical_block_size(rq, sector_size); |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 659 | blk_queue_physical_block_size(rq, physical_sector_size); |
Roger Pau Monne | 294caaf | 2013-06-21 12:56:54 +0200 | [diff] [blame] | 660 | blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 661 | |
| 662 | /* Each segment in a request is up to an aligned page in size. */ |
| 663 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); |
| 664 | blk_queue_max_segment_size(rq, PAGE_SIZE); |
| 665 | |
| 666 | /* Ensure a merged request will fit in a single I/O ring slot. */ |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 667 | blk_queue_max_segments(rq, segments); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 668 | |
| 669 | /* Make sure buffer addresses are sector-aligned. */ |
| 670 | blk_queue_dma_alignment(rq, 511); |
| 671 | |
Ian Campbell | 1c91fe1 | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 672 | /* Make sure we don't use bounce buffers. */ |
| 673 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); |
| 674 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 675 | gd->queue = rq; |
| 676 | |
| 677 | return 0; |
| 678 | } |
| 679 | |
| 680 | |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 681 | static void xlvbd_flush(struct blkfront_info *info) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 682 | { |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 683 | blk_queue_flush(info->rq, info->feature_flush); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 684 | printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n", |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 685 | info->gd->disk_name, |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 686 | info->flush_op == BLKIF_OP_WRITE_BARRIER ? |
| 687 | "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? |
| 688 | "flush diskcache" : "barrier or flush"), |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 689 | info->feature_flush ? "enabled;" : "disabled;", |
| 690 | "persistent grants:", |
| 691 | info->feature_persistent ? "enabled;" : "disabled;", |
| 692 | "indirect descriptors:", |
| 693 | info->max_indirect_segments ? "enabled;" : "disabled;"); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 694 | } |
| 695 | |
Stefano Stabellini | c80a420 | 2010-12-02 17:55:00 +0000 | [diff] [blame] | 696 | static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) |
| 697 | { |
| 698 | int major; |
| 699 | major = BLKIF_MAJOR(vdevice); |
| 700 | *minor = BLKIF_MINOR(vdevice); |
| 701 | switch (major) { |
| 702 | case XEN_IDE0_MAJOR: |
| 703 | *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET; |
| 704 | *minor = ((*minor / 64) * PARTS_PER_DISK) + |
| 705 | EMULATED_HD_DISK_MINOR_OFFSET; |
| 706 | break; |
| 707 | case XEN_IDE1_MAJOR: |
| 708 | *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET; |
| 709 | *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) + |
| 710 | EMULATED_HD_DISK_MINOR_OFFSET; |
| 711 | break; |
| 712 | case XEN_SCSI_DISK0_MAJOR: |
| 713 | *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET; |
| 714 | *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET; |
| 715 | break; |
| 716 | case XEN_SCSI_DISK1_MAJOR: |
| 717 | case XEN_SCSI_DISK2_MAJOR: |
| 718 | case XEN_SCSI_DISK3_MAJOR: |
| 719 | case XEN_SCSI_DISK4_MAJOR: |
| 720 | case XEN_SCSI_DISK5_MAJOR: |
| 721 | case XEN_SCSI_DISK6_MAJOR: |
| 722 | case XEN_SCSI_DISK7_MAJOR: |
| 723 | *offset = (*minor / PARTS_PER_DISK) + |
| 724 | ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) + |
| 725 | EMULATED_SD_DISK_NAME_OFFSET; |
| 726 | *minor = *minor + |
| 727 | ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) + |
| 728 | EMULATED_SD_DISK_MINOR_OFFSET; |
| 729 | break; |
| 730 | case XEN_SCSI_DISK8_MAJOR: |
| 731 | case XEN_SCSI_DISK9_MAJOR: |
| 732 | case XEN_SCSI_DISK10_MAJOR: |
| 733 | case XEN_SCSI_DISK11_MAJOR: |
| 734 | case XEN_SCSI_DISK12_MAJOR: |
| 735 | case XEN_SCSI_DISK13_MAJOR: |
| 736 | case XEN_SCSI_DISK14_MAJOR: |
| 737 | case XEN_SCSI_DISK15_MAJOR: |
| 738 | *offset = (*minor / PARTS_PER_DISK) + |
| 739 | ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) + |
| 740 | EMULATED_SD_DISK_NAME_OFFSET; |
| 741 | *minor = *minor + |
| 742 | ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) + |
| 743 | EMULATED_SD_DISK_MINOR_OFFSET; |
| 744 | break; |
| 745 | case XENVBD_MAJOR: |
| 746 | *offset = *minor / PARTS_PER_DISK; |
| 747 | break; |
| 748 | default: |
| 749 | printk(KERN_WARNING "blkfront: your disk configuration is " |
| 750 | "incorrect, please use an xvd device instead\n"); |
| 751 | return -ENODEV; |
| 752 | } |
| 753 | return 0; |
| 754 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 755 | |
Jan Beulich | e77c78c | 2012-04-05 16:37:22 +0100 | [diff] [blame] | 756 | static char *encode_disk_name(char *ptr, unsigned int n) |
| 757 | { |
| 758 | if (n >= 26) |
| 759 | ptr = encode_disk_name(ptr, n / 26 - 1); |
| 760 | *ptr = 'a' + n % 26; |
| 761 | return ptr + 1; |
| 762 | } |
| 763 | |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 764 | static int xlvbd_alloc_gendisk(blkif_sector_t capacity, |
| 765 | struct blkfront_info *info, |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 766 | u16 vdisk_info, u16 sector_size, |
| 767 | unsigned int physical_sector_size) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 768 | { |
| 769 | struct gendisk *gd; |
| 770 | int nr_minors = 1; |
Stefano Stabellini | c80a420 | 2010-12-02 17:55:00 +0000 | [diff] [blame] | 771 | int err; |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 772 | unsigned int offset; |
| 773 | int minor; |
| 774 | int nr_parts; |
Jan Beulich | e77c78c | 2012-04-05 16:37:22 +0100 | [diff] [blame] | 775 | char *ptr; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 776 | |
| 777 | BUG_ON(info->gd != NULL); |
| 778 | BUG_ON(info->rq != NULL); |
| 779 | |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 780 | if ((info->vdevice>>EXT_SHIFT) > 1) { |
| 781 | /* this is above the extended range; something is wrong */ |
| 782 | printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); |
| 783 | return -ENODEV; |
| 784 | } |
| 785 | |
| 786 | if (!VDEV_IS_EXTENDED(info->vdevice)) { |
Stefano Stabellini | c80a420 | 2010-12-02 17:55:00 +0000 | [diff] [blame] | 787 | err = xen_translate_vdev(info->vdevice, &minor, &offset); |
| 788 | if (err) |
| 789 | return err; |
| 790 | nr_parts = PARTS_PER_DISK; |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 791 | } else { |
| 792 | minor = BLKIF_MINOR_EXT(info->vdevice); |
| 793 | nr_parts = PARTS_PER_EXT_DISK; |
Stefano Stabellini | c80a420 | 2010-12-02 17:55:00 +0000 | [diff] [blame] | 794 | offset = minor / nr_parts; |
Stefan Bader | 89153b5 | 2011-07-14 15:30:37 +0200 | [diff] [blame] | 795 | if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) |
Stefano Stabellini | c80a420 | 2010-12-02 17:55:00 +0000 | [diff] [blame] | 796 | printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " |
| 797 | "emulated IDE disks,\n\t choose an xvd device name" |
| 798 | "from xvde on\n", info->vdevice); |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 799 | } |
Jan Beulich | e77c78c | 2012-04-05 16:37:22 +0100 | [diff] [blame] | 800 | if (minor >> MINORBITS) { |
| 801 | pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n", |
| 802 | info->vdevice, minor); |
| 803 | return -ENODEV; |
| 804 | } |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 805 | |
| 806 | if ((minor % nr_parts) == 0) |
| 807 | nr_minors = nr_parts; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 808 | |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 809 | err = xlbd_reserve_minors(minor, nr_minors); |
| 810 | if (err) |
| 811 | goto out; |
| 812 | err = -ENODEV; |
| 813 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 814 | gd = alloc_disk(nr_minors); |
| 815 | if (gd == NULL) |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 816 | goto release; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 817 | |
Jan Beulich | e77c78c | 2012-04-05 16:37:22 +0100 | [diff] [blame] | 818 | strcpy(gd->disk_name, DEV_NAME); |
| 819 | ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset); |
| 820 | BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN); |
| 821 | if (nr_minors > 1) |
| 822 | *ptr = 0; |
| 823 | else |
| 824 | snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr, |
| 825 | "%d", minor & (nr_parts - 1)); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 826 | |
| 827 | gd->major = XENVBD_MAJOR; |
| 828 | gd->first_minor = minor; |
| 829 | gd->fops = &xlvbd_block_fops; |
| 830 | gd->private_data = info; |
| 831 | gd->driverfs_dev = &(info->xbdev->dev); |
| 832 | set_capacity(gd, capacity); |
| 833 | |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 834 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 835 | info->max_indirect_segments ? : |
| 836 | BLKIF_MAX_SEGMENTS_PER_REQUEST)) { |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 837 | del_gendisk(gd); |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 838 | goto release; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 839 | } |
| 840 | |
| 841 | info->rq = gd->queue; |
| 842 | info->gd = gd; |
| 843 | |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 844 | xlvbd_flush(info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 845 | |
| 846 | if (vdisk_info & VDISK_READONLY) |
| 847 | set_disk_ro(gd, 1); |
| 848 | |
| 849 | if (vdisk_info & VDISK_REMOVABLE) |
| 850 | gd->flags |= GENHD_FL_REMOVABLE; |
| 851 | |
| 852 | if (vdisk_info & VDISK_CDROM) |
| 853 | gd->flags |= GENHD_FL_CD; |
| 854 | |
| 855 | return 0; |
| 856 | |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 857 | release: |
| 858 | xlbd_release_minors(minor, nr_minors); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 859 | out: |
| 860 | return err; |
| 861 | } |
| 862 | |
Daniel Stodden | a66b5ae | 2010-08-07 18:33:17 +0200 | [diff] [blame] | 863 | static void xlvbd_release_gendisk(struct blkfront_info *info) |
| 864 | { |
| 865 | unsigned int minor, nr_minors; |
| 866 | unsigned long flags; |
| 867 | |
| 868 | if (info->rq == NULL) |
| 869 | return; |
| 870 | |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 871 | spin_lock_irqsave(&info->io_lock, flags); |
Daniel Stodden | a66b5ae | 2010-08-07 18:33:17 +0200 | [diff] [blame] | 872 | |
| 873 | /* No more blkif_request(). */ |
| 874 | blk_stop_queue(info->rq); |
| 875 | |
| 876 | /* No more gnttab callback work. */ |
| 877 | gnttab_cancel_free_callback(&info->callback); |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 878 | spin_unlock_irqrestore(&info->io_lock, flags); |
Daniel Stodden | a66b5ae | 2010-08-07 18:33:17 +0200 | [diff] [blame] | 879 | |
| 880 | /* Flush gnttab callback work. Must be done with no locks held. */ |
Tejun Heo | 4382973 | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 881 | flush_work(&info->work); |
Daniel Stodden | a66b5ae | 2010-08-07 18:33:17 +0200 | [diff] [blame] | 882 | |
| 883 | del_gendisk(info->gd); |
| 884 | |
| 885 | minor = info->gd->first_minor; |
| 886 | nr_minors = info->gd->minors; |
| 887 | xlbd_release_minors(minor, nr_minors); |
| 888 | |
| 889 | blk_cleanup_queue(info->rq); |
| 890 | info->rq = NULL; |
| 891 | |
| 892 | put_disk(info->gd); |
| 893 | info->gd = NULL; |
| 894 | } |
| 895 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 896 | static void kick_pending_request_queues(struct blkfront_info *info) |
| 897 | { |
| 898 | if (!RING_FULL(&info->ring)) { |
| 899 | /* Re-enable calldowns. */ |
| 900 | blk_start_queue(info->rq); |
| 901 | /* Kick things off immediately. */ |
| 902 | do_blkif_request(info->rq); |
| 903 | } |
| 904 | } |
| 905 | |
| 906 | static void blkif_restart_queue(struct work_struct *work) |
| 907 | { |
| 908 | struct blkfront_info *info = container_of(work, struct blkfront_info, work); |
| 909 | |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 910 | spin_lock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 911 | if (info->connected == BLKIF_STATE_CONNECTED) |
| 912 | kick_pending_request_queues(info); |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 913 | spin_unlock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 914 | } |
| 915 | |
| 916 | static void blkif_free(struct blkfront_info *info, int suspend) |
| 917 | { |
Roger Pau Monne | 155b7ed | 2013-03-18 17:49:34 +0100 | [diff] [blame] | 918 | struct grant *persistent_gnt; |
| 919 | struct grant *n; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 920 | int i, j, segs; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 921 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 922 | /* Prevent new requests being issued until we fix things up. */ |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 923 | spin_lock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 924 | info->connected = suspend ? |
| 925 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; |
| 926 | /* No more blkif_request(). */ |
| 927 | if (info->rq) |
| 928 | blk_stop_queue(info->rq); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 929 | |
| 930 | /* Remove all persistent grants */ |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 931 | if (!list_empty(&info->grants)) { |
Roger Pau Monne | 155b7ed | 2013-03-18 17:49:34 +0100 | [diff] [blame] | 932 | list_for_each_entry_safe(persistent_gnt, n, |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 933 | &info->grants, node) { |
Roger Pau Monne | 155b7ed | 2013-03-18 17:49:34 +0100 | [diff] [blame] | 934 | list_del(&persistent_gnt->node); |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 935 | if (persistent_gnt->gref != GRANT_INVALID_REF) { |
| 936 | gnttab_end_foreign_access(persistent_gnt->gref, |
| 937 | 0, 0UL); |
| 938 | info->persistent_gnts_c--; |
| 939 | } |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 940 | if (info->feature_persistent) |
| 941 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
Roger Pau Monne | 155b7ed | 2013-03-18 17:49:34 +0100 | [diff] [blame] | 942 | kfree(persistent_gnt); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 943 | } |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 944 | } |
Roger Pau Monne | 9c1e050 | 2013-03-18 17:49:35 +0100 | [diff] [blame] | 945 | BUG_ON(info->persistent_gnts_c != 0); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 946 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 947 | /* |
| 948 | * Remove indirect pages, this only happens when using indirect |
| 949 | * descriptors but not persistent grants |
| 950 | */ |
| 951 | if (!list_empty(&info->indirect_pages)) { |
| 952 | struct page *indirect_page, *n; |
| 953 | |
| 954 | BUG_ON(info->feature_persistent); |
| 955 | list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { |
| 956 | list_del(&indirect_page->lru); |
| 957 | __free_page(indirect_page); |
| 958 | } |
| 959 | } |
| 960 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 961 | for (i = 0; i < BLK_RING_SIZE; i++) { |
| 962 | /* |
| 963 | * Clear persistent grants present in requests already |
| 964 | * on the shared ring |
| 965 | */ |
| 966 | if (!info->shadow[i].request) |
| 967 | goto free_shadow; |
| 968 | |
| 969 | segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? |
| 970 | info->shadow[i].req.u.indirect.nr_segments : |
| 971 | info->shadow[i].req.u.rw.nr_segments; |
| 972 | for (j = 0; j < segs; j++) { |
| 973 | persistent_gnt = info->shadow[i].grants_used[j]; |
| 974 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 975 | if (info->feature_persistent) |
| 976 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 977 | kfree(persistent_gnt); |
| 978 | } |
| 979 | |
| 980 | if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) |
| 981 | /* |
| 982 | * If this is not an indirect operation don't try to |
| 983 | * free indirect segments |
| 984 | */ |
| 985 | goto free_shadow; |
| 986 | |
| 987 | for (j = 0; j < INDIRECT_GREFS(segs); j++) { |
| 988 | persistent_gnt = info->shadow[i].indirect_grants[j]; |
| 989 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); |
| 990 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
| 991 | kfree(persistent_gnt); |
| 992 | } |
| 993 | |
| 994 | free_shadow: |
| 995 | kfree(info->shadow[i].grants_used); |
| 996 | info->shadow[i].grants_used = NULL; |
| 997 | kfree(info->shadow[i].indirect_grants); |
| 998 | info->shadow[i].indirect_grants = NULL; |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 999 | kfree(info->shadow[i].sg); |
| 1000 | info->shadow[i].sg = NULL; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1001 | } |
| 1002 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1003 | /* No more gnttab callback work. */ |
| 1004 | gnttab_cancel_free_callback(&info->callback); |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1005 | spin_unlock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1006 | |
| 1007 | /* Flush gnttab callback work. Must be done with no locks held. */ |
Tejun Heo | 4382973 | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 1008 | flush_work(&info->work); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1009 | |
| 1010 | /* Free resources associated with old device channel. */ |
| 1011 | if (info->ring_ref != GRANT_INVALID_REF) { |
| 1012 | gnttab_end_foreign_access(info->ring_ref, 0, |
| 1013 | (unsigned long)info->ring.sring); |
| 1014 | info->ring_ref = GRANT_INVALID_REF; |
| 1015 | info->ring.sring = NULL; |
| 1016 | } |
| 1017 | if (info->irq) |
| 1018 | unbind_from_irqhandler(info->irq, info); |
| 1019 | info->evtchn = info->irq = 0; |
| 1020 | |
| 1021 | } |
| 1022 | |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1023 | static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, |
| 1024 | struct blkif_response *bret) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1025 | { |
Roger Pau Monne | d62f691 | 2012-12-07 19:00:31 +0100 | [diff] [blame] | 1026 | int i = 0; |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1027 | struct scatterlist *sg; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1028 | char *bvec_data; |
| 1029 | void *shared_data; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1030 | int nseg; |
| 1031 | |
| 1032 | nseg = s->req.operation == BLKIF_OP_INDIRECT ? |
| 1033 | s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1034 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1035 | if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1036 | /* |
| 1037 | * Copy the data received from the backend into the bvec. |
| 1038 | * Since bv_offset can be different than 0, and bv_len different |
| 1039 | * than PAGE_SIZE, we have to keep track of the current offset, |
| 1040 | * to be sure we are copying the data from the right shared page. |
| 1041 | */ |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1042 | for_each_sg(s->sg, sg, nseg, i) { |
| 1043 | BUG_ON(sg->offset + sg->length > PAGE_SIZE); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1044 | shared_data = kmap_atomic( |
| 1045 | pfn_to_page(s->grants_used[i]->pfn)); |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1046 | bvec_data = kmap_atomic(sg_page(sg)); |
| 1047 | memcpy(bvec_data + sg->offset, |
| 1048 | shared_data + sg->offset, |
| 1049 | sg->length); |
| 1050 | kunmap_atomic(bvec_data); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1051 | kunmap_atomic(shared_data); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1052 | } |
| 1053 | } |
| 1054 | /* Add the persistent grant into the list of free grants */ |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1055 | for (i = 0; i < nseg; i++) { |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1056 | if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { |
| 1057 | /* |
| 1058 | * If the grant is still mapped by the backend (the |
| 1059 | * backend has chosen to make this grant persistent) |
| 1060 | * we add it at the head of the list, so it will be |
| 1061 | * reused first. |
| 1062 | */ |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1063 | if (!info->feature_persistent) |
| 1064 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", |
| 1065 | s->grants_used[i]->gref); |
| 1066 | list_add(&s->grants_used[i]->node, &info->grants); |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1067 | info->persistent_gnts_c++; |
| 1068 | } else { |
| 1069 | /* |
| 1070 | * If the grant is not mapped by the backend we end the |
| 1071 | * foreign access and add it to the tail of the list, |
| 1072 | * so it will not be picked again unless we run out of |
| 1073 | * persistent grants. |
| 1074 | */ |
| 1075 | gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); |
| 1076 | s->grants_used[i]->gref = GRANT_INVALID_REF; |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1077 | list_add_tail(&s->grants_used[i]->node, &info->grants); |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1078 | } |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1079 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1080 | if (s->req.operation == BLKIF_OP_INDIRECT) { |
| 1081 | for (i = 0; i < INDIRECT_GREFS(nseg); i++) { |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1082 | if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1083 | if (!info->feature_persistent) |
| 1084 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", |
| 1085 | s->indirect_grants[i]->gref); |
| 1086 | list_add(&s->indirect_grants[i]->node, &info->grants); |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1087 | info->persistent_gnts_c++; |
| 1088 | } else { |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1089 | struct page *indirect_page; |
| 1090 | |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1091 | gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1092 | /* |
| 1093 | * Add the used indirect page back to the list of |
| 1094 | * available pages for indirect grefs. |
| 1095 | */ |
| 1096 | indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); |
| 1097 | list_add(&indirect_page->lru, &info->indirect_pages); |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1098 | s->indirect_grants[i]->gref = GRANT_INVALID_REF; |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1099 | list_add_tail(&s->indirect_grants[i]->node, &info->grants); |
Roger Pau Monne | fbe363c | 2013-08-12 12:53:44 +0200 | [diff] [blame] | 1100 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1101 | } |
| 1102 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1103 | } |
| 1104 | |
| 1105 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) |
| 1106 | { |
| 1107 | struct request *req; |
| 1108 | struct blkif_response *bret; |
| 1109 | RING_IDX i, rp; |
| 1110 | unsigned long flags; |
| 1111 | struct blkfront_info *info = (struct blkfront_info *)dev_id; |
Kiyoshi Ueda | f530f036 | 2007-12-11 17:47:36 -0500 | [diff] [blame] | 1112 | int error; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1113 | |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1114 | spin_lock_irqsave(&info->io_lock, flags); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1115 | |
| 1116 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1117 | spin_unlock_irqrestore(&info->io_lock, flags); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1118 | return IRQ_HANDLED; |
| 1119 | } |
| 1120 | |
| 1121 | again: |
| 1122 | rp = info->ring.sring->rsp_prod; |
| 1123 | rmb(); /* Ensure we see queued responses up to 'rp'. */ |
| 1124 | |
| 1125 | for (i = info->ring.rsp_cons; i != rp; i++) { |
| 1126 | unsigned long id; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1127 | |
| 1128 | bret = RING_GET_RESPONSE(&info->ring, i); |
| 1129 | id = bret->id; |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 1130 | /* |
| 1131 | * The backend has messed up and given us an id that we would |
| 1132 | * never have given to it (we stamp it up to BLK_RING_SIZE - |
| 1133 | * look in get_id_from_freelist. |
| 1134 | */ |
| 1135 | if (id >= BLK_RING_SIZE) { |
| 1136 | WARN(1, "%s: response to %s has incorrect id (%ld)\n", |
| 1137 | info->gd->disk_name, op_name(bret->operation), id); |
| 1138 | /* We can't safely get the 'struct request' as |
| 1139 | * the id is busted. */ |
| 1140 | continue; |
| 1141 | } |
Jeremy Fitzhardinge | a945b98 | 2010-11-01 17:03:14 -0400 | [diff] [blame] | 1142 | req = info->shadow[id].request; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1143 | |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1144 | if (bret->operation != BLKIF_OP_DISCARD) |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1145 | blkif_completion(&info->shadow[id], info, bret); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1146 | |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 1147 | if (add_id_to_freelist(info, id)) { |
| 1148 | WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", |
| 1149 | info->gd->disk_name, op_name(bret->operation), id); |
| 1150 | continue; |
| 1151 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1152 | |
Kiyoshi Ueda | f530f036 | 2007-12-11 17:47:36 -0500 | [diff] [blame] | 1153 | error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1154 | switch (bret->operation) { |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1155 | case BLKIF_OP_DISCARD: |
| 1156 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
| 1157 | struct request_queue *rq = info->rq; |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 1158 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
| 1159 | info->gd->disk_name, op_name(bret->operation)); |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1160 | error = -EOPNOTSUPP; |
| 1161 | info->feature_discard = 0; |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1162 | info->feature_secdiscard = 0; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1163 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1164 | queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1165 | } |
| 1166 | __blk_end_request_all(req, error); |
| 1167 | break; |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 1168 | case BLKIF_OP_FLUSH_DISKCACHE: |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1169 | case BLKIF_OP_WRITE_BARRIER: |
| 1170 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 1171 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
| 1172 | info->gd->disk_name, op_name(bret->operation)); |
Kiyoshi Ueda | f530f036 | 2007-12-11 17:47:36 -0500 | [diff] [blame] | 1173 | error = -EOPNOTSUPP; |
Jeremy Fitzhardinge | dcb8bae | 2010-11-02 11:55:58 -0400 | [diff] [blame] | 1174 | } |
| 1175 | if (unlikely(bret->status == BLKIF_RSP_ERROR && |
Konrad Rzeszutek Wilk | 97e3683 | 2011-10-12 12:12:36 -0400 | [diff] [blame] | 1176 | info->shadow[id].req.u.rw.nr_segments == 0)) { |
Konrad Rzeszutek Wilk | 6878c32 | 2012-05-25 17:34:51 -0400 | [diff] [blame] | 1177 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", |
| 1178 | info->gd->disk_name, op_name(bret->operation)); |
Jeremy Fitzhardinge | dcb8bae | 2010-11-02 11:55:58 -0400 | [diff] [blame] | 1179 | error = -EOPNOTSUPP; |
| 1180 | } |
| 1181 | if (unlikely(error)) { |
| 1182 | if (error == -EOPNOTSUPP) |
| 1183 | error = 0; |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1184 | info->feature_flush = 0; |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 1185 | info->flush_op = 0; |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1186 | xlvbd_flush(info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1187 | } |
| 1188 | /* fall through */ |
| 1189 | case BLKIF_OP_READ: |
| 1190 | case BLKIF_OP_WRITE: |
| 1191 | if (unlikely(bret->status != BLKIF_RSP_OKAY)) |
| 1192 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " |
| 1193 | "request: %x\n", bret->status); |
| 1194 | |
Tejun Heo | 40cbbb7 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 1195 | __blk_end_request_all(req, error); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1196 | break; |
| 1197 | default: |
| 1198 | BUG(); |
| 1199 | } |
| 1200 | } |
| 1201 | |
| 1202 | info->ring.rsp_cons = i; |
| 1203 | |
| 1204 | if (i != info->ring.req_prod_pvt) { |
| 1205 | int more_to_do; |
| 1206 | RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); |
| 1207 | if (more_to_do) |
| 1208 | goto again; |
| 1209 | } else |
| 1210 | info->ring.sring->rsp_event = i + 1; |
| 1211 | |
| 1212 | kick_pending_request_queues(info); |
| 1213 | |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1214 | spin_unlock_irqrestore(&info->io_lock, flags); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1215 | |
| 1216 | return IRQ_HANDLED; |
| 1217 | } |
| 1218 | |
| 1219 | |
| 1220 | static int setup_blkring(struct xenbus_device *dev, |
| 1221 | struct blkfront_info *info) |
| 1222 | { |
| 1223 | struct blkif_sring *sring; |
| 1224 | int err; |
| 1225 | |
| 1226 | info->ring_ref = GRANT_INVALID_REF; |
| 1227 | |
Ian Campbell | a144ff0 | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 1228 | sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1229 | if (!sring) { |
| 1230 | xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); |
| 1231 | return -ENOMEM; |
| 1232 | } |
| 1233 | SHARED_RING_INIT(sring); |
| 1234 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); |
| 1235 | |
| 1236 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); |
| 1237 | if (err < 0) { |
| 1238 | free_page((unsigned long)sring); |
| 1239 | info->ring.sring = NULL; |
| 1240 | goto fail; |
| 1241 | } |
| 1242 | info->ring_ref = err; |
| 1243 | |
| 1244 | err = xenbus_alloc_evtchn(dev, &info->evtchn); |
| 1245 | if (err) |
| 1246 | goto fail; |
| 1247 | |
Theodore Ts'o | 89c30f1 | 2012-07-17 13:46:19 -0400 | [diff] [blame] | 1248 | err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0, |
| 1249 | "blkif", info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1250 | if (err <= 0) { |
| 1251 | xenbus_dev_fatal(dev, err, |
| 1252 | "bind_evtchn_to_irqhandler failed"); |
| 1253 | goto fail; |
| 1254 | } |
| 1255 | info->irq = err; |
| 1256 | |
| 1257 | return 0; |
| 1258 | fail: |
| 1259 | blkif_free(info, 0); |
| 1260 | return err; |
| 1261 | } |
| 1262 | |
| 1263 | |
| 1264 | /* Common code used when first setting up, and when resuming. */ |
Ian Campbell | 203fd61 | 2009-12-04 15:33:54 +0000 | [diff] [blame] | 1265 | static int talk_to_blkback(struct xenbus_device *dev, |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1266 | struct blkfront_info *info) |
| 1267 | { |
| 1268 | const char *message = NULL; |
| 1269 | struct xenbus_transaction xbt; |
| 1270 | int err; |
| 1271 | |
| 1272 | /* Create shared ring, alloc event channel. */ |
| 1273 | err = setup_blkring(dev, info); |
| 1274 | if (err) |
| 1275 | goto out; |
| 1276 | |
| 1277 | again: |
| 1278 | err = xenbus_transaction_start(&xbt); |
| 1279 | if (err) { |
| 1280 | xenbus_dev_fatal(dev, err, "starting transaction"); |
| 1281 | goto destroy_blkring; |
| 1282 | } |
| 1283 | |
| 1284 | err = xenbus_printf(xbt, dev->nodename, |
| 1285 | "ring-ref", "%u", info->ring_ref); |
| 1286 | if (err) { |
| 1287 | message = "writing ring-ref"; |
| 1288 | goto abort_transaction; |
| 1289 | } |
| 1290 | err = xenbus_printf(xbt, dev->nodename, |
| 1291 | "event-channel", "%u", info->evtchn); |
| 1292 | if (err) { |
| 1293 | message = "writing event-channel"; |
| 1294 | goto abort_transaction; |
| 1295 | } |
Markus Armbruster | 3e33423 | 2008-04-02 10:54:02 -0700 | [diff] [blame] | 1296 | err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", |
| 1297 | XEN_IO_PROTO_ABI_NATIVE); |
| 1298 | if (err) { |
| 1299 | message = "writing protocol"; |
| 1300 | goto abort_transaction; |
| 1301 | } |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1302 | err = xenbus_printf(xbt, dev->nodename, |
Roger Pau Monne | cb5bd4d | 2012-11-02 16:43:04 +0100 | [diff] [blame] | 1303 | "feature-persistent", "%u", 1); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1304 | if (err) |
| 1305 | dev_warn(&dev->dev, |
| 1306 | "writing persistent grants feature to xenbus"); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1307 | |
| 1308 | err = xenbus_transaction_end(xbt, 0); |
| 1309 | if (err) { |
| 1310 | if (err == -EAGAIN) |
| 1311 | goto again; |
| 1312 | xenbus_dev_fatal(dev, err, "completing transaction"); |
| 1313 | goto destroy_blkring; |
| 1314 | } |
| 1315 | |
| 1316 | xenbus_switch_state(dev, XenbusStateInitialised); |
| 1317 | |
| 1318 | return 0; |
| 1319 | |
| 1320 | abort_transaction: |
| 1321 | xenbus_transaction_end(xbt, 1); |
| 1322 | if (message) |
| 1323 | xenbus_dev_fatal(dev, err, "%s", message); |
| 1324 | destroy_blkring: |
| 1325 | blkif_free(info, 0); |
| 1326 | out: |
| 1327 | return err; |
| 1328 | } |
| 1329 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1330 | /** |
| 1331 | * Entry point to this code when a new device is created. Allocate the basic |
| 1332 | * structures and the ring buffer for communication with the backend, and |
| 1333 | * inform the backend of the appropriate details for those. Switch to |
| 1334 | * Initialised state. |
| 1335 | */ |
| 1336 | static int blkfront_probe(struct xenbus_device *dev, |
| 1337 | const struct xenbus_device_id *id) |
| 1338 | { |
| 1339 | int err, vdevice, i; |
| 1340 | struct blkfront_info *info; |
| 1341 | |
| 1342 | /* FIXME: Use dynamic device id if this is not set. */ |
| 1343 | err = xenbus_scanf(XBT_NIL, dev->nodename, |
| 1344 | "virtual-device", "%i", &vdevice); |
| 1345 | if (err != 1) { |
Chris Lalancette | 9246b5f | 2008-09-17 14:30:32 -0700 | [diff] [blame] | 1346 | /* go looking in the extended area instead */ |
| 1347 | err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", |
| 1348 | "%i", &vdevice); |
| 1349 | if (err != 1) { |
| 1350 | xenbus_dev_fatal(dev, err, "reading virtual-device"); |
| 1351 | return err; |
| 1352 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1353 | } |
| 1354 | |
Stefano Stabellini | b98a409 | 2010-07-29 14:53:16 +0100 | [diff] [blame] | 1355 | if (xen_hvm_domain()) { |
| 1356 | char *type; |
| 1357 | int len; |
| 1358 | /* no unplug has been done: do not hook devices != xen vbds */ |
Ian Campbell | 1dc7ce9 | 2010-08-23 11:59:29 +0100 | [diff] [blame] | 1359 | if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { |
Stefano Stabellini | b98a409 | 2010-07-29 14:53:16 +0100 | [diff] [blame] | 1360 | int major; |
Stefano Stabellini | c1c5413 | 2010-05-14 12:44:30 +0100 | [diff] [blame] | 1361 | |
Stefano Stabellini | b98a409 | 2010-07-29 14:53:16 +0100 | [diff] [blame] | 1362 | if (!VDEV_IS_EXTENDED(vdevice)) |
| 1363 | major = BLKIF_MAJOR(vdevice); |
| 1364 | else |
| 1365 | major = XENVBD_MAJOR; |
Stefano Stabellini | c1c5413 | 2010-05-14 12:44:30 +0100 | [diff] [blame] | 1366 | |
Stefano Stabellini | b98a409 | 2010-07-29 14:53:16 +0100 | [diff] [blame] | 1367 | if (major != XENVBD_MAJOR) { |
| 1368 | printk(KERN_INFO |
| 1369 | "%s: HVM does not support vbd %d as xen block device\n", |
| 1370 | __FUNCTION__, vdevice); |
| 1371 | return -ENODEV; |
| 1372 | } |
| 1373 | } |
| 1374 | /* do not create a PV cdrom device if we are an HVM guest */ |
| 1375 | type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); |
| 1376 | if (IS_ERR(type)) |
| 1377 | return -ENODEV; |
| 1378 | if (strncmp(type, "cdrom", 5) == 0) { |
| 1379 | kfree(type); |
Stefano Stabellini | c1c5413 | 2010-05-14 12:44:30 +0100 | [diff] [blame] | 1380 | return -ENODEV; |
| 1381 | } |
Stefano Stabellini | b98a409 | 2010-07-29 14:53:16 +0100 | [diff] [blame] | 1382 | kfree(type); |
Stefano Stabellini | c1c5413 | 2010-05-14 12:44:30 +0100 | [diff] [blame] | 1383 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1384 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
| 1385 | if (!info) { |
| 1386 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); |
| 1387 | return -ENOMEM; |
| 1388 | } |
| 1389 | |
Daniel Stodden | b70f5fa | 2010-04-30 22:01:19 +0000 | [diff] [blame] | 1390 | mutex_init(&info->mutex); |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1391 | spin_lock_init(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1392 | info->xbdev = dev; |
| 1393 | info->vdevice = vdevice; |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1394 | INIT_LIST_HEAD(&info->grants); |
| 1395 | INIT_LIST_HEAD(&info->indirect_pages); |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1396 | info->persistent_gnts_c = 0; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1397 | info->connected = BLKIF_STATE_DISCONNECTED; |
| 1398 | INIT_WORK(&info->work, blkif_restart_queue); |
| 1399 | |
| 1400 | for (i = 0; i < BLK_RING_SIZE; i++) |
Konrad Rzeszutek Wilk | 97e3683 | 2011-10-12 12:12:36 -0400 | [diff] [blame] | 1401 | info->shadow[i].req.u.rw.id = i+1; |
| 1402 | info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1403 | |
| 1404 | /* Front end dir is a number, which is used as the id. */ |
| 1405 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); |
Greg Kroah-Hartman | a1b4b12 | 2009-04-30 14:43:31 -0700 | [diff] [blame] | 1406 | dev_set_drvdata(&dev->dev, info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1407 | |
Ian Campbell | 203fd61 | 2009-12-04 15:33:54 +0000 | [diff] [blame] | 1408 | err = talk_to_blkback(dev, info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1409 | if (err) { |
| 1410 | kfree(info); |
Greg Kroah-Hartman | a1b4b12 | 2009-04-30 14:43:31 -0700 | [diff] [blame] | 1411 | dev_set_drvdata(&dev->dev, NULL); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1412 | return err; |
| 1413 | } |
| 1414 | |
| 1415 | return 0; |
| 1416 | } |
| 1417 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1418 | static void split_bio_end(struct bio *bio, int error) |
| 1419 | { |
| 1420 | struct split_bio *split_bio = bio->bi_private; |
| 1421 | |
| 1422 | if (error) |
| 1423 | split_bio->err = error; |
| 1424 | |
| 1425 | if (atomic_dec_and_test(&split_bio->pending)) { |
| 1426 | split_bio->bio->bi_phys_segments = 0; |
| 1427 | bio_endio(split_bio->bio, split_bio->err); |
| 1428 | kfree(split_bio); |
| 1429 | } |
| 1430 | bio_put(bio); |
| 1431 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1432 | |
| 1433 | static int blkif_recover(struct blkfront_info *info) |
| 1434 | { |
| 1435 | int i; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1436 | struct request *req, *n; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1437 | struct blk_shadow *copy; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1438 | int rc; |
| 1439 | struct bio *bio, *cloned_bio; |
| 1440 | struct bio_list bio_list, merge_bio; |
| 1441 | unsigned int segs, offset; |
| 1442 | int pending, size; |
| 1443 | struct split_bio *split_bio; |
| 1444 | struct list_head requests; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1445 | |
| 1446 | /* Stage 1: Make a safe copy of the shadow state. */ |
Mihnea Dobrescu-Balaur | 29d0b21 | 2013-03-11 13:23:36 +0200 | [diff] [blame] | 1447 | copy = kmemdup(info->shadow, sizeof(info->shadow), |
Ian Campbell | a144ff0 | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 1448 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1449 | if (!copy) |
| 1450 | return -ENOMEM; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1451 | |
| 1452 | /* Stage 2: Set up free list. */ |
| 1453 | memset(&info->shadow, 0, sizeof(info->shadow)); |
| 1454 | for (i = 0; i < BLK_RING_SIZE; i++) |
Konrad Rzeszutek Wilk | 97e3683 | 2011-10-12 12:12:36 -0400 | [diff] [blame] | 1455 | info->shadow[i].req.u.rw.id = i+1; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1456 | info->shadow_free = info->ring.req_prod_pvt; |
Konrad Rzeszutek Wilk | 97e3683 | 2011-10-12 12:12:36 -0400 | [diff] [blame] | 1457 | info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1458 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1459 | rc = blkfront_setup_indirect(info); |
| 1460 | if (rc) { |
| 1461 | kfree(copy); |
| 1462 | return rc; |
| 1463 | } |
| 1464 | |
| 1465 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; |
| 1466 | blk_queue_max_segments(info->rq, segs); |
| 1467 | bio_list_init(&bio_list); |
| 1468 | INIT_LIST_HEAD(&requests); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1469 | for (i = 0; i < BLK_RING_SIZE; i++) { |
| 1470 | /* Not in use? */ |
Jeremy Fitzhardinge | a945b98 | 2010-11-01 17:03:14 -0400 | [diff] [blame] | 1471 | if (!copy[i].request) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1472 | continue; |
| 1473 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1474 | /* |
| 1475 | * Get the bios in the request so we can re-queue them. |
| 1476 | */ |
| 1477 | if (copy[i].request->cmd_flags & |
| 1478 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { |
| 1479 | /* |
| 1480 | * Flush operations don't contain bios, so |
| 1481 | * we need to requeue the whole request |
| 1482 | */ |
| 1483 | list_add(©[i].request->queuelist, &requests); |
| 1484 | continue; |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1485 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1486 | merge_bio.head = copy[i].request->bio; |
| 1487 | merge_bio.tail = copy[i].request->biotail; |
| 1488 | bio_list_merge(&bio_list, &merge_bio); |
| 1489 | copy[i].request->bio = NULL; |
| 1490 | blk_put_request(copy[i].request); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1491 | } |
| 1492 | |
| 1493 | kfree(copy); |
| 1494 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1495 | /* |
| 1496 | * Empty the queue, this is important because we might have |
| 1497 | * requests in the queue with more segments than what we |
| 1498 | * can handle now. |
| 1499 | */ |
| 1500 | spin_lock_irq(&info->io_lock); |
| 1501 | while ((req = blk_fetch_request(info->rq)) != NULL) { |
| 1502 | if (req->cmd_flags & |
| 1503 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { |
| 1504 | list_add(&req->queuelist, &requests); |
| 1505 | continue; |
| 1506 | } |
| 1507 | merge_bio.head = req->bio; |
| 1508 | merge_bio.tail = req->biotail; |
| 1509 | bio_list_merge(&bio_list, &merge_bio); |
| 1510 | req->bio = NULL; |
| 1511 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) |
| 1512 | pr_alert("diskcache flush request found!\n"); |
| 1513 | __blk_put_request(info->rq, req); |
| 1514 | } |
| 1515 | spin_unlock_irq(&info->io_lock); |
| 1516 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1517 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
| 1518 | |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1519 | spin_lock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1520 | |
| 1521 | /* Now safe for us to use the shared ring */ |
| 1522 | info->connected = BLKIF_STATE_CONNECTED; |
| 1523 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1524 | /* Kick any other new requests queued since we resumed */ |
| 1525 | kick_pending_request_queues(info); |
| 1526 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1527 | list_for_each_entry_safe(req, n, &requests, queuelist) { |
| 1528 | /* Requeue pending requests (flush or discard) */ |
| 1529 | list_del_init(&req->queuelist); |
| 1530 | BUG_ON(req->nr_phys_segments > segs); |
| 1531 | blk_requeue_request(info->rq, req); |
| 1532 | } |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1533 | spin_unlock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1534 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1535 | while ((bio = bio_list_pop(&bio_list)) != NULL) { |
| 1536 | /* Traverse the list of pending bios and re-queue them */ |
| 1537 | if (bio_segments(bio) > segs) { |
| 1538 | /* |
| 1539 | * This bio has more segments than what we can |
| 1540 | * handle, we have to split it. |
| 1541 | */ |
| 1542 | pending = (bio_segments(bio) + segs - 1) / segs; |
| 1543 | split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); |
| 1544 | BUG_ON(split_bio == NULL); |
| 1545 | atomic_set(&split_bio->pending, pending); |
| 1546 | split_bio->bio = bio; |
| 1547 | for (i = 0; i < pending; i++) { |
| 1548 | offset = (i * segs * PAGE_SIZE) >> 9; |
| 1549 | size = min((unsigned int)(segs * PAGE_SIZE) >> 9, |
| 1550 | (unsigned int)(bio->bi_size >> 9) - offset); |
| 1551 | cloned_bio = bio_clone(bio, GFP_NOIO); |
| 1552 | BUG_ON(cloned_bio == NULL); |
Kent Overstreet | 6678d83 | 2013-08-07 11:14:32 -0700 | [diff] [blame] | 1553 | bio_trim(cloned_bio, offset, size); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1554 | cloned_bio->bi_private = split_bio; |
| 1555 | cloned_bio->bi_end_io = split_bio_end; |
| 1556 | submit_bio(cloned_bio->bi_rw, cloned_bio); |
| 1557 | } |
| 1558 | /* |
| 1559 | * Now we have to wait for all those smaller bios to |
| 1560 | * end, so we can also end the "parent" bio. |
| 1561 | */ |
| 1562 | continue; |
| 1563 | } |
| 1564 | /* We don't need to split this bio */ |
| 1565 | submit_bio(bio->bi_rw, bio); |
| 1566 | } |
| 1567 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1568 | return 0; |
| 1569 | } |
| 1570 | |
| 1571 | /** |
| 1572 | * We are reconnecting to the backend, due to a suspend/resume, or a backend |
| 1573 | * driver restart. We tear down our blkif structure and recreate it, but |
| 1574 | * leave the device-layer structures intact so that this is transparent to the |
| 1575 | * rest of the kernel. |
| 1576 | */ |
| 1577 | static int blkfront_resume(struct xenbus_device *dev) |
| 1578 | { |
Greg Kroah-Hartman | a1b4b12 | 2009-04-30 14:43:31 -0700 | [diff] [blame] | 1579 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1580 | int err; |
| 1581 | |
| 1582 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); |
| 1583 | |
| 1584 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
| 1585 | |
Ian Campbell | 203fd61 | 2009-12-04 15:33:54 +0000 | [diff] [blame] | 1586 | err = talk_to_blkback(dev, info); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1587 | |
| 1588 | /* |
| 1589 | * We have to wait for the backend to switch to |
| 1590 | * connected state, since we want to read which |
| 1591 | * features it supports. |
| 1592 | */ |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1593 | |
| 1594 | return err; |
| 1595 | } |
| 1596 | |
Daniel Stodden | b70f5fa | 2010-04-30 22:01:19 +0000 | [diff] [blame] | 1597 | static void |
| 1598 | blkfront_closing(struct blkfront_info *info) |
| 1599 | { |
| 1600 | struct xenbus_device *xbdev = info->xbdev; |
| 1601 | struct block_device *bdev = NULL; |
| 1602 | |
| 1603 | mutex_lock(&info->mutex); |
| 1604 | |
| 1605 | if (xbdev->state == XenbusStateClosing) { |
| 1606 | mutex_unlock(&info->mutex); |
| 1607 | return; |
| 1608 | } |
| 1609 | |
| 1610 | if (info->gd) |
| 1611 | bdev = bdget_disk(info->gd, 0); |
| 1612 | |
| 1613 | mutex_unlock(&info->mutex); |
| 1614 | |
| 1615 | if (!bdev) { |
| 1616 | xenbus_frontend_closed(xbdev); |
| 1617 | return; |
| 1618 | } |
| 1619 | |
| 1620 | mutex_lock(&bdev->bd_mutex); |
| 1621 | |
Daniel Stodden | 7b32d10 | 2010-04-30 22:01:23 +0000 | [diff] [blame] | 1622 | if (bdev->bd_openers) { |
Daniel Stodden | b70f5fa | 2010-04-30 22:01:19 +0000 | [diff] [blame] | 1623 | xenbus_dev_error(xbdev, -EBUSY, |
| 1624 | "Device in use; refusing to close"); |
| 1625 | xenbus_switch_state(xbdev, XenbusStateClosing); |
| 1626 | } else { |
| 1627 | xlvbd_release_gendisk(info); |
| 1628 | xenbus_frontend_closed(xbdev); |
| 1629 | } |
| 1630 | |
| 1631 | mutex_unlock(&bdev->bd_mutex); |
| 1632 | bdput(bdev); |
| 1633 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1634 | |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1635 | static void blkfront_setup_discard(struct blkfront_info *info) |
| 1636 | { |
| 1637 | int err; |
| 1638 | char *type; |
| 1639 | unsigned int discard_granularity; |
| 1640 | unsigned int discard_alignment; |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1641 | unsigned int discard_secure; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1642 | |
| 1643 | type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); |
| 1644 | if (IS_ERR(type)) |
| 1645 | return; |
| 1646 | |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1647 | info->feature_secdiscard = 0; |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1648 | if (strncmp(type, "phy", 3) == 0) { |
| 1649 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1650 | "discard-granularity", "%u", &discard_granularity, |
| 1651 | "discard-alignment", "%u", &discard_alignment, |
| 1652 | NULL); |
| 1653 | if (!err) { |
| 1654 | info->feature_discard = 1; |
| 1655 | info->discard_granularity = discard_granularity; |
| 1656 | info->discard_alignment = discard_alignment; |
| 1657 | } |
Konrad Rzeszutek Wilk | 5ea4298 | 2011-10-12 16:23:30 -0400 | [diff] [blame] | 1658 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1659 | "discard-secure", "%d", &discard_secure, |
| 1660 | NULL); |
| 1661 | if (!err) |
| 1662 | info->feature_secdiscard = discard_secure; |
| 1663 | |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1664 | } else if (strncmp(type, "file", 4) == 0) |
| 1665 | info->feature_discard = 1; |
| 1666 | |
| 1667 | kfree(type); |
| 1668 | } |
| 1669 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1670 | static int blkfront_setup_indirect(struct blkfront_info *info) |
| 1671 | { |
| 1672 | unsigned int indirect_segments, segs; |
| 1673 | int err, i; |
| 1674 | |
| 1675 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1676 | "feature-max-indirect-segments", "%u", &indirect_segments, |
| 1677 | NULL); |
| 1678 | if (err) { |
| 1679 | info->max_indirect_segments = 0; |
| 1680 | segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; |
| 1681 | } else { |
| 1682 | info->max_indirect_segments = min(indirect_segments, |
| 1683 | xen_blkif_max_segments); |
| 1684 | segs = info->max_indirect_segments; |
| 1685 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1686 | |
| 1687 | err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE); |
| 1688 | if (err) |
| 1689 | goto out_of_memory; |
| 1690 | |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1691 | if (!info->feature_persistent && info->max_indirect_segments) { |
| 1692 | /* |
| 1693 | * We are using indirect descriptors but not persistent |
| 1694 | * grants, we need to allocate a set of pages that can be |
| 1695 | * used for mapping indirect grefs |
| 1696 | */ |
| 1697 | int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE; |
| 1698 | |
| 1699 | BUG_ON(!list_empty(&info->indirect_pages)); |
| 1700 | for (i = 0; i < num; i++) { |
| 1701 | struct page *indirect_page = alloc_page(GFP_NOIO); |
| 1702 | if (!indirect_page) |
| 1703 | goto out_of_memory; |
| 1704 | list_add(&indirect_page->lru, &info->indirect_pages); |
| 1705 | } |
| 1706 | } |
| 1707 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1708 | for (i = 0; i < BLK_RING_SIZE; i++) { |
| 1709 | info->shadow[i].grants_used = kzalloc( |
| 1710 | sizeof(info->shadow[i].grants_used[0]) * segs, |
| 1711 | GFP_NOIO); |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1712 | info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1713 | if (info->max_indirect_segments) |
| 1714 | info->shadow[i].indirect_grants = kzalloc( |
| 1715 | sizeof(info->shadow[i].indirect_grants[0]) * |
| 1716 | INDIRECT_GREFS(segs), |
| 1717 | GFP_NOIO); |
| 1718 | if ((info->shadow[i].grants_used == NULL) || |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1719 | (info->shadow[i].sg == NULL) || |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1720 | (info->max_indirect_segments && |
| 1721 | (info->shadow[i].indirect_grants == NULL))) |
| 1722 | goto out_of_memory; |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1723 | sg_init_table(info->shadow[i].sg, segs); |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1724 | } |
| 1725 | |
| 1726 | |
| 1727 | return 0; |
| 1728 | |
| 1729 | out_of_memory: |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1730 | for (i = 0; i < BLK_RING_SIZE; i++) { |
| 1731 | kfree(info->shadow[i].grants_used); |
| 1732 | info->shadow[i].grants_used = NULL; |
Roger Pau Monne | b764915 | 2013-05-02 10:58:50 +0200 | [diff] [blame] | 1733 | kfree(info->shadow[i].sg); |
| 1734 | info->shadow[i].sg = NULL; |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1735 | kfree(info->shadow[i].indirect_grants); |
| 1736 | info->shadow[i].indirect_grants = NULL; |
| 1737 | } |
Roger Pau Monne | bfe11d6 | 2013-10-29 18:31:14 +0100 | [diff] [blame] | 1738 | if (!list_empty(&info->indirect_pages)) { |
| 1739 | struct page *indirect_page, *n; |
| 1740 | list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { |
| 1741 | list_del(&indirect_page->lru); |
| 1742 | __free_page(indirect_page); |
| 1743 | } |
| 1744 | } |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1745 | return -ENOMEM; |
| 1746 | } |
| 1747 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1748 | /* |
| 1749 | * Invoked when the backend is finally 'ready' (and has told produced |
| 1750 | * the details about the physical device - #sectors, size, etc). |
| 1751 | */ |
| 1752 | static void blkfront_connect(struct blkfront_info *info) |
| 1753 | { |
| 1754 | unsigned long long sectors; |
| 1755 | unsigned long sector_size; |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 1756 | unsigned int physical_sector_size; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1757 | unsigned int binfo; |
| 1758 | int err; |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1759 | int barrier, flush, discard, persistent; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1760 | |
K. Y. Srinivasan | 1fa73be | 2010-03-11 13:42:26 -0800 | [diff] [blame] | 1761 | switch (info->connected) { |
| 1762 | case BLKIF_STATE_CONNECTED: |
| 1763 | /* |
| 1764 | * Potentially, the back-end may be signalling |
| 1765 | * a capacity change; update the capacity. |
| 1766 | */ |
| 1767 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, |
| 1768 | "sectors", "%Lu", §ors); |
| 1769 | if (XENBUS_EXIST_ERR(err)) |
| 1770 | return; |
| 1771 | printk(KERN_INFO "Setting capacity to %Lu\n", |
| 1772 | sectors); |
| 1773 | set_capacity(info->gd, sectors); |
K. Y. Srinivasan | 2def141 | 2010-03-18 15:00:54 -0700 | [diff] [blame] | 1774 | revalidate_disk(info->gd); |
K. Y. Srinivasan | 1fa73be | 2010-03-11 13:42:26 -0800 | [diff] [blame] | 1775 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1776 | return; |
K. Y. Srinivasan | 1fa73be | 2010-03-11 13:42:26 -0800 | [diff] [blame] | 1777 | case BLKIF_STATE_SUSPENDED: |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1778 | /* |
| 1779 | * If we are recovering from suspension, we need to wait |
| 1780 | * for the backend to announce it's features before |
| 1781 | * reconnecting, at least we need to know if the backend |
| 1782 | * supports indirect descriptors, and how many. |
| 1783 | */ |
| 1784 | blkif_recover(info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1785 | return; |
| 1786 | |
Jeremy Fitzhardinge | b4dddb4 | 2010-03-11 15:10:40 -0800 | [diff] [blame] | 1787 | default: |
| 1788 | break; |
K. Y. Srinivasan | 1fa73be | 2010-03-11 13:42:26 -0800 | [diff] [blame] | 1789 | } |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1790 | |
| 1791 | dev_dbg(&info->xbdev->dev, "%s:%s.\n", |
| 1792 | __func__, info->xbdev->otherend); |
| 1793 | |
| 1794 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1795 | "sectors", "%llu", §ors, |
| 1796 | "info", "%u", &binfo, |
| 1797 | "sector-size", "%lu", §or_size, |
| 1798 | NULL); |
| 1799 | if (err) { |
| 1800 | xenbus_dev_fatal(info->xbdev, err, |
| 1801 | "reading backend fields at %s", |
| 1802 | info->xbdev->otherend); |
| 1803 | return; |
| 1804 | } |
| 1805 | |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 1806 | /* |
| 1807 | * physcial-sector-size is a newer field, so old backends may not |
| 1808 | * provide this. Assume physical sector size to be the same as |
| 1809 | * sector_size in that case. |
| 1810 | */ |
| 1811 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, |
| 1812 | "physical-sector-size", "%u", &physical_sector_size); |
| 1813 | if (err != 1) |
| 1814 | physical_sector_size = sector_size; |
| 1815 | |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 1816 | info->feature_flush = 0; |
| 1817 | info->flush_op = 0; |
| 1818 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1819 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
Marek Marczykowski | 4352b47 | 2011-05-03 12:04:52 -0400 | [diff] [blame] | 1820 | "feature-barrier", "%d", &barrier, |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1821 | NULL); |
Jeremy Fitzhardinge | 7901d14 | 2010-07-28 10:49:29 -0700 | [diff] [blame] | 1822 | |
| 1823 | /* |
| 1824 | * If there's no "feature-barrier" defined, then it means |
| 1825 | * we're dealing with a very old backend which writes |
Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1826 | * synchronously; nothing to do. |
Jeremy Fitzhardinge | 7901d14 | 2010-07-28 10:49:29 -0700 | [diff] [blame] | 1827 | * |
Tejun Heo | 6958f14 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1828 | * If there are barriers, then we use flush. |
Jeremy Fitzhardinge | 7901d14 | 2010-07-28 10:49:29 -0700 | [diff] [blame] | 1829 | */ |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 1830 | if (!err && barrier) { |
Jeremy Fitzhardinge | be2f837 | 2010-11-02 10:38:33 -0400 | [diff] [blame] | 1831 | info->feature_flush = REQ_FLUSH | REQ_FUA; |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 1832 | info->flush_op = BLKIF_OP_WRITE_BARRIER; |
| 1833 | } |
| 1834 | /* |
| 1835 | * And if there is "feature-flush-cache" use that above |
| 1836 | * barriers. |
| 1837 | */ |
| 1838 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1839 | "feature-flush-cache", "%d", &flush, |
| 1840 | NULL); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1841 | |
Konrad Rzeszutek Wilk | edf6ef5 | 2011-05-03 12:01:11 -0400 | [diff] [blame] | 1842 | if (!err && flush) { |
| 1843 | info->feature_flush = REQ_FLUSH; |
| 1844 | info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; |
| 1845 | } |
Li Dongyang | ed30bf3 | 2011-09-01 18:39:09 +0800 | [diff] [blame] | 1846 | |
| 1847 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1848 | "feature-discard", "%d", &discard, |
| 1849 | NULL); |
| 1850 | |
| 1851 | if (!err && discard) |
| 1852 | blkfront_setup_discard(info); |
| 1853 | |
Roger Pau Monne | 0a8704a | 2012-10-24 18:58:45 +0200 | [diff] [blame] | 1854 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, |
| 1855 | "feature-persistent", "%u", &persistent, |
| 1856 | NULL); |
| 1857 | if (err) |
| 1858 | info->feature_persistent = 0; |
| 1859 | else |
| 1860 | info->feature_persistent = persistent; |
| 1861 | |
Roger Pau Monne | 402b27f | 2013-04-18 16:06:54 +0200 | [diff] [blame] | 1862 | err = blkfront_setup_indirect(info); |
| 1863 | if (err) { |
| 1864 | xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", |
| 1865 | info->xbdev->otherend); |
| 1866 | return; |
| 1867 | } |
| 1868 | |
Stefan Bader | 7c4d7d7 | 2013-05-13 16:28:15 +0200 | [diff] [blame] | 1869 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, |
| 1870 | physical_sector_size); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1871 | if (err) { |
| 1872 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", |
| 1873 | info->xbdev->otherend); |
| 1874 | return; |
| 1875 | } |
| 1876 | |
| 1877 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
| 1878 | |
| 1879 | /* Kick pending requests. */ |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1880 | spin_lock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1881 | info->connected = BLKIF_STATE_CONNECTED; |
| 1882 | kick_pending_request_queues(info); |
Steven Noonan | 3467811 | 2012-02-17 12:04:44 -0800 | [diff] [blame] | 1883 | spin_unlock_irq(&info->io_lock); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1884 | |
| 1885 | add_disk(info->gd); |
Christian Limpach | 1d78d70 | 2008-04-02 10:54:04 -0700 | [diff] [blame] | 1886 | |
| 1887 | info->is_ready = 1; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1888 | } |
| 1889 | |
| 1890 | /** |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1891 | * Callback received when the backend's state changes. |
| 1892 | */ |
Ian Campbell | 203fd61 | 2009-12-04 15:33:54 +0000 | [diff] [blame] | 1893 | static void blkback_changed(struct xenbus_device *dev, |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1894 | enum xenbus_state backend_state) |
| 1895 | { |
Greg Kroah-Hartman | a1b4b12 | 2009-04-30 14:43:31 -0700 | [diff] [blame] | 1896 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1897 | |
Ian Campbell | 203fd61 | 2009-12-04 15:33:54 +0000 | [diff] [blame] | 1898 | dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1899 | |
| 1900 | switch (backend_state) { |
| 1901 | case XenbusStateInitialising: |
| 1902 | case XenbusStateInitWait: |
| 1903 | case XenbusStateInitialised: |
Noboru Iwamatsu | b78c951 | 2009-10-13 17:22:29 -0400 | [diff] [blame] | 1904 | case XenbusStateReconfiguring: |
| 1905 | case XenbusStateReconfigured: |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1906 | case XenbusStateUnknown: |
| 1907 | case XenbusStateClosed: |
| 1908 | break; |
| 1909 | |
| 1910 | case XenbusStateConnected: |
| 1911 | blkfront_connect(info); |
| 1912 | break; |
| 1913 | |
| 1914 | case XenbusStateClosing: |
Daniel Stodden | b70f5fa | 2010-04-30 22:01:19 +0000 | [diff] [blame] | 1915 | blkfront_closing(info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1916 | break; |
| 1917 | } |
| 1918 | } |
| 1919 | |
Daniel Stodden | fa1bd35 | 2010-04-30 22:01:22 +0000 | [diff] [blame] | 1920 | static int blkfront_remove(struct xenbus_device *xbdev) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1921 | { |
Daniel Stodden | fa1bd35 | 2010-04-30 22:01:22 +0000 | [diff] [blame] | 1922 | struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); |
| 1923 | struct block_device *bdev = NULL; |
| 1924 | struct gendisk *disk; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1925 | |
Daniel Stodden | fa1bd35 | 2010-04-30 22:01:22 +0000 | [diff] [blame] | 1926 | dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1927 | |
| 1928 | blkif_free(info, 0); |
| 1929 | |
Daniel Stodden | fa1bd35 | 2010-04-30 22:01:22 +0000 | [diff] [blame] | 1930 | mutex_lock(&info->mutex); |
| 1931 | |
| 1932 | disk = info->gd; |
| 1933 | if (disk) |
| 1934 | bdev = bdget_disk(disk, 0); |
| 1935 | |
| 1936 | info->xbdev = NULL; |
| 1937 | mutex_unlock(&info->mutex); |
| 1938 | |
| 1939 | if (!bdev) { |
Jan Beulich | 0e34582 | 2010-08-07 18:28:55 +0200 | [diff] [blame] | 1940 | kfree(info); |
Daniel Stodden | fa1bd35 | 2010-04-30 22:01:22 +0000 | [diff] [blame] | 1941 | return 0; |
| 1942 | } |
| 1943 | |
| 1944 | /* |
| 1945 | * The xbdev was removed before we reached the Closed |
| 1946 | * state. See if it's safe to remove the disk. If the bdev |
| 1947 | * isn't closed yet, we let release take care of it. |
| 1948 | */ |
| 1949 | |
| 1950 | mutex_lock(&bdev->bd_mutex); |
| 1951 | info = disk->private_data; |
| 1952 | |
Daniel Stodden | d54142c | 2010-08-07 18:51:21 +0200 | [diff] [blame] | 1953 | dev_warn(disk_to_dev(disk), |
| 1954 | "%s was hot-unplugged, %d stale handles\n", |
| 1955 | xbdev->nodename, bdev->bd_openers); |
| 1956 | |
Daniel Stodden | 7b32d10 | 2010-04-30 22:01:23 +0000 | [diff] [blame] | 1957 | if (info && !bdev->bd_openers) { |
Daniel Stodden | fa1bd35 | 2010-04-30 22:01:22 +0000 | [diff] [blame] | 1958 | xlvbd_release_gendisk(info); |
| 1959 | disk->private_data = NULL; |
| 1960 | kfree(info); |
| 1961 | } |
| 1962 | |
| 1963 | mutex_unlock(&bdev->bd_mutex); |
| 1964 | bdput(bdev); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1965 | |
| 1966 | return 0; |
| 1967 | } |
| 1968 | |
Christian Limpach | 1d78d70 | 2008-04-02 10:54:04 -0700 | [diff] [blame] | 1969 | static int blkfront_is_ready(struct xenbus_device *dev) |
| 1970 | { |
Greg Kroah-Hartman | a1b4b12 | 2009-04-30 14:43:31 -0700 | [diff] [blame] | 1971 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
Christian Limpach | 1d78d70 | 2008-04-02 10:54:04 -0700 | [diff] [blame] | 1972 | |
Jan Beulich | 5d7ed20 | 2010-08-07 18:31:12 +0200 | [diff] [blame] | 1973 | return info->is_ready && info->xbdev; |
Christian Limpach | 1d78d70 | 2008-04-02 10:54:04 -0700 | [diff] [blame] | 1974 | } |
| 1975 | |
Al Viro | a63c848 | 2008-03-02 10:23:47 -0500 | [diff] [blame] | 1976 | static int blkif_open(struct block_device *bdev, fmode_t mode) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1977 | { |
Daniel Stodden | 1396174 | 2010-08-07 18:36:53 +0200 | [diff] [blame] | 1978 | struct gendisk *disk = bdev->bd_disk; |
| 1979 | struct blkfront_info *info; |
| 1980 | int err = 0; |
Arnd Bergmann | 6e9624b | 2010-08-07 18:25:34 +0200 | [diff] [blame] | 1981 | |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 1982 | mutex_lock(&blkfront_mutex); |
Arnd Bergmann | 6e9624b | 2010-08-07 18:25:34 +0200 | [diff] [blame] | 1983 | |
Daniel Stodden | 1396174 | 2010-08-07 18:36:53 +0200 | [diff] [blame] | 1984 | info = disk->private_data; |
| 1985 | if (!info) { |
| 1986 | /* xbdev gone */ |
| 1987 | err = -ERESTARTSYS; |
| 1988 | goto out; |
| 1989 | } |
| 1990 | |
| 1991 | mutex_lock(&info->mutex); |
| 1992 | |
| 1993 | if (!info->gd) |
| 1994 | /* xbdev is closed */ |
| 1995 | err = -ERESTARTSYS; |
| 1996 | |
| 1997 | mutex_unlock(&info->mutex); |
| 1998 | |
Daniel Stodden | 1396174 | 2010-08-07 18:36:53 +0200 | [diff] [blame] | 1999 | out: |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 2000 | mutex_unlock(&blkfront_mutex); |
Daniel Stodden | 1396174 | 2010-08-07 18:36:53 +0200 | [diff] [blame] | 2001 | return err; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2002 | } |
| 2003 | |
Al Viro | db2a144 | 2013-05-05 21:52:57 -0400 | [diff] [blame] | 2004 | static void blkif_release(struct gendisk *disk, fmode_t mode) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2005 | { |
Al Viro | a63c848 | 2008-03-02 10:23:47 -0500 | [diff] [blame] | 2006 | struct blkfront_info *info = disk->private_data; |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2007 | struct block_device *bdev; |
| 2008 | struct xenbus_device *xbdev; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2009 | |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 2010 | mutex_lock(&blkfront_mutex); |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2011 | |
| 2012 | bdev = bdget_disk(disk, 0); |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2013 | |
Felipe Pena | 2f089cb | 2013-11-09 13:36:09 -0200 | [diff] [blame] | 2014 | if (!bdev) { |
| 2015 | WARN(1, "Block device %s yanked out from us!\n", disk->disk_name); |
| 2016 | goto out_mutex; |
| 2017 | } |
Daniel Stodden | acfca3c | 2010-08-07 18:47:26 +0200 | [diff] [blame] | 2018 | if (bdev->bd_openers) |
| 2019 | goto out; |
| 2020 | |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2021 | /* |
| 2022 | * Check if we have been instructed to close. We will have |
| 2023 | * deferred this request, because the bdev was still open. |
| 2024 | */ |
| 2025 | |
| 2026 | mutex_lock(&info->mutex); |
| 2027 | xbdev = info->xbdev; |
| 2028 | |
| 2029 | if (xbdev && xbdev->state == XenbusStateClosing) { |
| 2030 | /* pending switch to state closed */ |
Daniel Stodden | d54142c | 2010-08-07 18:51:21 +0200 | [diff] [blame] | 2031 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2032 | xlvbd_release_gendisk(info); |
| 2033 | xenbus_frontend_closed(info->xbdev); |
| 2034 | } |
| 2035 | |
| 2036 | mutex_unlock(&info->mutex); |
| 2037 | |
| 2038 | if (!xbdev) { |
| 2039 | /* sudden device removal */ |
Daniel Stodden | d54142c | 2010-08-07 18:51:21 +0200 | [diff] [blame] | 2040 | dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2041 | xlvbd_release_gendisk(info); |
| 2042 | disk->private_data = NULL; |
| 2043 | kfree(info); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2044 | } |
Daniel Stodden | 7fd152f | 2010-08-07 18:45:12 +0200 | [diff] [blame] | 2045 | |
Jens Axboe | a4cc14e | 2010-08-08 21:50:05 -0400 | [diff] [blame] | 2046 | out: |
Andrew Jones | dad5cf6 | 2012-02-16 13:16:25 +0100 | [diff] [blame] | 2047 | bdput(bdev); |
Felipe Pena | 2f089cb | 2013-11-09 13:36:09 -0200 | [diff] [blame] | 2048 | out_mutex: |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 2049 | mutex_unlock(&blkfront_mutex); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2050 | } |
| 2051 | |
Alexey Dobriyan | 83d5cde | 2009-09-21 17:01:13 -0700 | [diff] [blame] | 2052 | static const struct block_device_operations xlvbd_block_fops = |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2053 | { |
| 2054 | .owner = THIS_MODULE, |
Al Viro | a63c848 | 2008-03-02 10:23:47 -0500 | [diff] [blame] | 2055 | .open = blkif_open, |
| 2056 | .release = blkif_release, |
Ian Campbell | 597592d | 2008-02-21 13:03:45 -0800 | [diff] [blame] | 2057 | .getgeo = blkif_getgeo, |
Arnd Bergmann | 8a6cfeb | 2010-07-08 10:18:46 +0200 | [diff] [blame] | 2058 | .ioctl = blkif_ioctl, |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2059 | }; |
| 2060 | |
| 2061 | |
Márton Németh | ec9c42e | 2010-01-10 13:39:52 +0100 | [diff] [blame] | 2062 | static const struct xenbus_device_id blkfront_ids[] = { |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2063 | { "vbd" }, |
| 2064 | { "" } |
| 2065 | }; |
| 2066 | |
Jan Beulich | 73db144 | 2011-12-22 09:08:13 +0000 | [diff] [blame] | 2067 | static DEFINE_XENBUS_DRIVER(blkfront, , |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2068 | .probe = blkfront_probe, |
| 2069 | .remove = blkfront_remove, |
| 2070 | .resume = blkfront_resume, |
Ian Campbell | 203fd61 | 2009-12-04 15:33:54 +0000 | [diff] [blame] | 2071 | .otherend_changed = blkback_changed, |
Christian Limpach | 1d78d70 | 2008-04-02 10:54:04 -0700 | [diff] [blame] | 2072 | .is_ready = blkfront_is_ready, |
Jan Beulich | 73db144 | 2011-12-22 09:08:13 +0000 | [diff] [blame] | 2073 | ); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2074 | |
| 2075 | static int __init xlblk_init(void) |
| 2076 | { |
Laszlo Ersek | 469738e | 2011-10-07 21:34:38 +0200 | [diff] [blame] | 2077 | int ret; |
| 2078 | |
Jeremy Fitzhardinge | 6e83358 | 2008-08-19 13:16:17 -0700 | [diff] [blame] | 2079 | if (!xen_domain()) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2080 | return -ENODEV; |
| 2081 | |
Igor Mammedov | e95ae5a | 2012-03-27 19:31:08 +0200 | [diff] [blame] | 2082 | if (xen_hvm_domain() && !xen_platform_pci_unplug) |
Igor Mammedov | b9136d2 | 2012-03-21 15:08:38 +0100 | [diff] [blame] | 2083 | return -ENODEV; |
| 2084 | |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2085 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { |
| 2086 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", |
| 2087 | XENVBD_MAJOR, DEV_NAME); |
| 2088 | return -ENODEV; |
| 2089 | } |
| 2090 | |
Jan Beulich | 73db144 | 2011-12-22 09:08:13 +0000 | [diff] [blame] | 2091 | ret = xenbus_register_frontend(&blkfront_driver); |
Laszlo Ersek | 469738e | 2011-10-07 21:34:38 +0200 | [diff] [blame] | 2092 | if (ret) { |
| 2093 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
| 2094 | return ret; |
| 2095 | } |
| 2096 | |
| 2097 | return 0; |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2098 | } |
| 2099 | module_init(xlblk_init); |
| 2100 | |
| 2101 | |
Jan Beulich | 5a60d0c | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 2102 | static void __exit xlblk_exit(void) |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2103 | { |
Jan Beulich | 8605067 | 2012-04-05 16:04:52 +0100 | [diff] [blame] | 2104 | xenbus_unregister_driver(&blkfront_driver); |
| 2105 | unregister_blkdev(XENVBD_MAJOR, DEV_NAME); |
| 2106 | kfree(minors); |
Jeremy Fitzhardinge | 9f27ee5 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 2107 | } |
| 2108 | module_exit(xlblk_exit); |
| 2109 | |
| 2110 | MODULE_DESCRIPTION("Xen virtual block device frontend"); |
| 2111 | MODULE_LICENSE("GPL"); |
| 2112 | MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); |
Mark McLoughlin | d2f0c52 | 2008-04-02 10:54:05 -0700 | [diff] [blame] | 2113 | MODULE_ALIAS("xen:vbd"); |
Mark McLoughlin | 4f93f09b | 2008-04-02 10:54:06 -0700 | [diff] [blame] | 2114 | MODULE_ALIAS("xenblk"); |