blob: 4fd8640d146cc07c33c18edb051d6e2123163266 [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/******************************************************************************
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04002 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04007 * drivers/block/xen-blkfront.c
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04008 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
Tao Chen77387b82015-04-01 15:04:22 +000037#define pr_fmt(fmt) "xen-blkback: " fmt
38
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040039#include <linux/spinlock.h>
40#include <linux/kthread.h>
41#include <linux/list.h>
42#include <linux/delay.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080043#include <linux/freezer.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020044#include <linux/bitmap.h>
Jeremy Fitzhardingeafd91d02009-09-15 14:12:37 -070045
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080046#include <xen/events.h>
47#include <xen/page.h>
Stefano Stabellinie79affc2012-08-08 17:21:14 +000048#include <xen/xen.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080049#include <asm/xen/hypervisor.h>
50#include <asm/xen/hypercall.h>
Roger Pau Monne087ffec2013-02-14 11:12:09 +010051#include <xen/balloon.h>
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +000052#include <xen/grant_table.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040053#include "common.h"
54
55/*
Roger Pau Monnec6cc1422013-04-17 20:18:56 +020056 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
59 *
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
62 * IO workloads.
63 */
64
Roger Pau Monne402b27f2013-04-18 16:06:54 +020065static int xen_blkif_max_buffer_pages = 1024;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +020066module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67MODULE_PARM_DESC(max_buffer_pages,
68"Maximum number of free pages to keep in each block backend buffer");
69
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020070/*
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
76 *
77 * When the list of persistent grants is full we clean it up using a LRU
78 * algorithm.
79 */
80
Roger Pau Monne402b27f2013-04-18 16:06:54 +020081static int xen_blkif_max_pgrants = 1056;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020082module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
85
86/*
Bob Liu86839c52015-06-03 13:40:03 +080087 * Maximum order of pages to be used for the shared ring between front and
88 * backend, 4KB page granularity is used.
89 */
Julien Grall9cce2912015-10-13 17:50:11 +010090unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
Bob Liu86839c52015-06-03 13:40:03 +080091module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
92MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
93/*
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020094 * The LRU mechanism to clean the lists of persistent grants needs to
95 * be executed periodically. The time interval between consecutive executions
96 * of the purge mechanism is set in ms.
97 */
98#define LRU_INTERVAL 100
99
100/*
101 * When the persistent grants list is full we will remove unused grants
102 * from the list. The percent number of grants to be removed at each LRU
103 * execution.
104 */
105#define LRU_PERCENT_CLEAN 5
106
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400107/* Run-time switchable: /sys/module/blkback/parameters/ */
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400108static unsigned int log_stats;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400109module_param(log_stats, int, 0644);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400110
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400111#define BLKBACK_INVALID_HANDLE (~0)
112
David Vrabelff4b1562015-01-08 18:06:01 +0000113/* Number of free pages to remove on each call to gnttab_free_pages */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200114#define NUM_BATCH_FREE_PAGES 10
115
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200116static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400117{
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200118 unsigned long flags;
119
120 spin_lock_irqsave(&blkif->free_pages_lock, flags);
121 if (list_empty(&blkif->free_pages)) {
122 BUG_ON(blkif->free_pages_num != 0);
123 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
David Vrabelff4b1562015-01-08 18:06:01 +0000124 return gnttab_alloc_pages(1, page);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200125 }
126 BUG_ON(blkif->free_pages_num == 0);
127 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
128 list_del(&page[0]->lru);
129 blkif->free_pages_num--;
130 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
131
132 return 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400133}
134
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200135static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
136 int num)
137{
138 unsigned long flags;
139 int i;
140
141 spin_lock_irqsave(&blkif->free_pages_lock, flags);
142 for (i = 0; i < num; i++)
143 list_add(&page[i]->lru, &blkif->free_pages);
144 blkif->free_pages_num += num;
145 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
146}
147
148static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
149{
150 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
151 struct page *page[NUM_BATCH_FREE_PAGES];
152 unsigned int num_pages = 0;
153 unsigned long flags;
154
155 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 while (blkif->free_pages_num > num) {
157 BUG_ON(list_empty(&blkif->free_pages));
158 page[num_pages] = list_first_entry(&blkif->free_pages,
159 struct page, lru);
160 list_del(&page[num_pages]->lru);
161 blkif->free_pages_num--;
162 if (++num_pages == NUM_BATCH_FREE_PAGES) {
163 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
David Vrabelff4b1562015-01-08 18:06:01 +0000164 gnttab_free_pages(num_pages, page);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200165 spin_lock_irqsave(&blkif->free_pages_lock, flags);
166 num_pages = 0;
167 }
168 }
169 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
170 if (num_pages != 0)
David Vrabelff4b1562015-01-08 18:06:01 +0000171 gnttab_free_pages(num_pages, page);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200172}
173
174#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
175
Bob Liu59795702015-11-14 11:12:15 +0800176static int do_block_io_op(struct xen_blkif_ring *ring);
177static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -0400178 struct blkif_request *req,
179 struct pending_req *pending_req);
Bob Liu59795702015-11-14 11:12:15 +0800180static void make_response(struct xen_blkif_ring *ring, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400181 unsigned short op, int st);
182
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100183#define foreach_grant_safe(pos, n, rbtree, node) \
184 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
Roger Pau Monne217fd5e2013-03-18 17:49:33 +0100185 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200186 &(pos)->node != NULL; \
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100187 (pos) = container_of(n, typeof(*(pos)), node), \
188 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200189
190
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200191/*
Bob Liu59795702015-11-14 11:12:15 +0800192 * pers_gnts_lock must be used around all the persistent grant helpers
193 * because blkback may use multi-thread/queue for each backend.
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200194 */
195static int add_persistent_gnt(struct xen_blkif *blkif,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200196 struct persistent_gnt *persistent_gnt)
197{
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200198 struct rb_node **new = NULL, *parent = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200199 struct persistent_gnt *this;
200
Bob Liu59795702015-11-14 11:12:15 +0800201 BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200202 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
203 if (!blkif->vbd.overflow_max_grants)
204 blkif->vbd.overflow_max_grants = 1;
205 return -EBUSY;
206 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200207 /* Figure out where to put new node */
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200208 new = &blkif->persistent_gnts.rb_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200209 while (*new) {
210 this = container_of(*new, struct persistent_gnt, node);
211
212 parent = *new;
213 if (persistent_gnt->gnt < this->gnt)
214 new = &((*new)->rb_left);
215 else if (persistent_gnt->gnt > this->gnt)
216 new = &((*new)->rb_right);
217 else {
Tao Chen77387b82015-04-01 15:04:22 +0000218 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200219 return -EINVAL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200220 }
221 }
222
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200223 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
224 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200225 /* Add new node and rebalance tree. */
226 rb_link_node(&(persistent_gnt->node), parent, new);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200227 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
228 blkif->persistent_gnt_c++;
229 atomic_inc(&blkif->persistent_gnt_in_use);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200230 return 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200231}
232
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200233static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200234 grant_ref_t gref)
235{
236 struct persistent_gnt *data;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200237 struct rb_node *node = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200238
Bob Liu59795702015-11-14 11:12:15 +0800239 BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200240 node = blkif->persistent_gnts.rb_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200241 while (node) {
242 data = container_of(node, struct persistent_gnt, node);
243
244 if (gref < data->gnt)
245 node = node->rb_left;
246 else if (gref > data->gnt)
247 node = node->rb_right;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200248 else {
249 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
Tao Chen77387b82015-04-01 15:04:22 +0000250 pr_alert_ratelimited("requesting a grant already in use\n");
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200251 return NULL;
252 }
253 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
254 atomic_inc(&blkif->persistent_gnt_in_use);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200255 return data;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200256 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200257 }
258 return NULL;
259}
260
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200261static void put_persistent_gnt(struct xen_blkif *blkif,
262 struct persistent_gnt *persistent_gnt)
263{
Bob Liu59795702015-11-14 11:12:15 +0800264 BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200265 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
Tao Chen77387b82015-04-01 15:04:22 +0000266 pr_alert_ratelimited("freeing a grant already unused\n");
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200267 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
268 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
269 atomic_dec(&blkif->persistent_gnt_in_use);
270}
271
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200272static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
273 unsigned int num)
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100274{
275 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
276 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
277 struct persistent_gnt *persistent_gnt;
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100278 struct rb_node *n;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100279 int segs_to_unmap = 0;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000280 struct gntab_unmap_queue_data unmap_data;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000281
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000282 unmap_data.pages = pages;
283 unmap_data.unmap_ops = unmap;
284 unmap_data.kunmap_ops = NULL;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100285
Bob Liu59795702015-11-14 11:12:15 +0800286 BUG_ON(!spin_is_locked(&blkif->pers_gnts_lock));
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100287 foreach_grant_safe(persistent_gnt, n, root, node) {
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100288 BUG_ON(persistent_gnt->handle ==
289 BLKBACK_INVALID_HANDLE);
290 gnttab_set_unmap_op(&unmap[segs_to_unmap],
291 (unsigned long) pfn_to_kaddr(page_to_pfn(
292 persistent_gnt->page)),
293 GNTMAP_host_map,
294 persistent_gnt->handle);
295
296 pages[segs_to_unmap] = persistent_gnt->page;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100297
298 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
299 !rb_next(&persistent_gnt->node)) {
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000300
301 unmap_data.count = segs_to_unmap;
Bob Liub44166c2015-04-03 14:42:59 +0800302 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000303
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200304 put_free_pages(blkif, pages, segs_to_unmap);
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100305 segs_to_unmap = 0;
306 }
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100307
308 rb_erase(&persistent_gnt->node, root);
309 kfree(persistent_gnt);
310 num--;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100311 }
312 BUG_ON(num != 0);
313}
314
Roger Pau Monneabb97b82014-02-11 20:34:03 -0700315void xen_blkbk_unmap_purged_grants(struct work_struct *work)
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200316{
317 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
318 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
319 struct persistent_gnt *persistent_gnt;
Bob Liu325d73b2015-04-03 14:42:58 +0800320 int segs_to_unmap = 0;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200321 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
Bob Liu325d73b2015-04-03 14:42:58 +0800322 struct gntab_unmap_queue_data unmap_data;
Bob Liu59795702015-11-14 11:12:15 +0800323 unsigned long flags;
Bob Liu325d73b2015-04-03 14:42:58 +0800324
Bob Liu325d73b2015-04-03 14:42:58 +0800325 unmap_data.pages = pages;
326 unmap_data.unmap_ops = unmap;
327 unmap_data.kunmap_ops = NULL;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200328
Bob Liu59795702015-11-14 11:12:15 +0800329 spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200330 while(!list_empty(&blkif->persistent_purge_list)) {
331 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
332 struct persistent_gnt,
333 remove_node);
334 list_del(&persistent_gnt->remove_node);
335
336 gnttab_set_unmap_op(&unmap[segs_to_unmap],
337 vaddr(persistent_gnt->page),
338 GNTMAP_host_map,
339 persistent_gnt->handle);
340
341 pages[segs_to_unmap] = persistent_gnt->page;
342
343 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
Bob Liu325d73b2015-04-03 14:42:58 +0800344 unmap_data.count = segs_to_unmap;
Bob Liub44166c2015-04-03 14:42:59 +0800345 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200346 put_free_pages(blkif, pages, segs_to_unmap);
347 segs_to_unmap = 0;
348 }
349 kfree(persistent_gnt);
350 }
Bob Liu59795702015-11-14 11:12:15 +0800351 spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200352 if (segs_to_unmap > 0) {
Bob Liu325d73b2015-04-03 14:42:58 +0800353 unmap_data.count = segs_to_unmap;
Bob Liub44166c2015-04-03 14:42:59 +0800354 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200355 put_free_pages(blkif, pages, segs_to_unmap);
356 }
357}
358
359static void purge_persistent_gnt(struct xen_blkif *blkif)
360{
361 struct persistent_gnt *persistent_gnt;
362 struct rb_node *n;
363 unsigned int num_clean, total;
Roger Pau Monne2d910542013-06-21 12:56:53 +0200364 bool scan_used = false, clean_used = false;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200365 struct rb_root *root;
Bob Liu59795702015-11-14 11:12:15 +0800366 unsigned long flags;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200367
Bob Liu59795702015-11-14 11:12:15 +0800368 spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200369 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
370 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
371 !blkif->vbd.overflow_max_grants)) {
Bob Liu59795702015-11-14 11:12:15 +0800372 goto out;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200373 }
374
Bob Liu53bc7dc2015-07-22 14:40:10 +0800375 if (work_busy(&blkif->persistent_purge_work)) {
376 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
Bob Liu59795702015-11-14 11:12:15 +0800377 goto out;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200378 }
379
380 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
381 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
382 num_clean = min(blkif->persistent_gnt_c, num_clean);
Roger Pau Monne2d910542013-06-21 12:56:53 +0200383 if ((num_clean == 0) ||
384 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
Bob Liu59795702015-11-14 11:12:15 +0800385 goto out;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200386
387 /*
388 * At this point, we can assure that there will be no calls
389 * to get_persistent_grant (because we are executing this code from
390 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
391 * which means that the number of currently used grants will go down,
392 * but never up, so we will always be able to remove the requested
393 * number of grants.
394 */
395
396 total = num_clean;
397
Tao Chen77387b82015-04-01 15:04:22 +0000398 pr_debug("Going to purge %u persistent grants\n", num_clean);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200399
Roger Pau Monneef753412014-02-04 11:26:13 +0100400 BUG_ON(!list_empty(&blkif->persistent_purge_list));
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200401 root = &blkif->persistent_gnts;
402purge_list:
403 foreach_grant_safe(persistent_gnt, n, root, node) {
404 BUG_ON(persistent_gnt->handle ==
405 BLKBACK_INVALID_HANDLE);
406
Roger Pau Monne2d910542013-06-21 12:56:53 +0200407 if (clean_used) {
408 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
409 continue;
410 }
411
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200412 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
413 continue;
414 if (!scan_used &&
415 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
416 continue;
417
418 rb_erase(&persistent_gnt->node, root);
419 list_add(&persistent_gnt->remove_node,
420 &blkif->persistent_purge_list);
421 if (--num_clean == 0)
422 goto finished;
423 }
424 /*
425 * If we get here it means we also need to start cleaning
426 * grants that were used since last purge in order to cope
427 * with the requested num
428 */
Roger Pau Monne2d910542013-06-21 12:56:53 +0200429 if (!scan_used && !clean_used) {
Tao Chen77387b82015-04-01 15:04:22 +0000430 pr_debug("Still missing %u purged frames\n", num_clean);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200431 scan_used = true;
432 goto purge_list;
433 }
434finished:
Roger Pau Monne2d910542013-06-21 12:56:53 +0200435 if (!clean_used) {
Tao Chen77387b82015-04-01 15:04:22 +0000436 pr_debug("Finished scanning for grants to clean, removing used flag\n");
Roger Pau Monne2d910542013-06-21 12:56:53 +0200437 clean_used = true;
438 goto purge_list;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200439 }
Roger Pau Monne2d910542013-06-21 12:56:53 +0200440
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200441 blkif->persistent_gnt_c -= (total - num_clean);
Bob Liu59795702015-11-14 11:12:15 +0800442 spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200443 blkif->vbd.overflow_max_grants = 0;
444
445 /* We can defer this work */
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200446 schedule_work(&blkif->persistent_purge_work);
Tao Chen77387b82015-04-01 15:04:22 +0000447 pr_debug("Purged %u/%u\n", (total - num_clean), total);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200448 return;
Bob Liu59795702015-11-14 11:12:15 +0800449
450out:
451 spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
452
453 return;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200454}
455
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400456/*
457 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400458 */
Bob Liu59795702015-11-14 11:12:15 +0800459static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400460{
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400461 struct pending_req *req = NULL;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400462 unsigned long flags;
463
Bob Liu59795702015-11-14 11:12:15 +0800464 spin_lock_irqsave(&ring->pending_free_lock, flags);
465 if (!list_empty(&ring->pending_free)) {
466 req = list_entry(ring->pending_free.next, struct pending_req,
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400467 free_list);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400468 list_del(&req->free_list);
469 }
Bob Liu59795702015-11-14 11:12:15 +0800470 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400471 return req;
472}
473
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400474/*
475 * Return the 'pending_req' structure back to the freepool. We also
476 * wake up the thread if it was waiting for a free page.
477 */
Bob Liu59795702015-11-14 11:12:15 +0800478static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400479{
480 unsigned long flags;
481 int was_empty;
482
Bob Liu59795702015-11-14 11:12:15 +0800483 spin_lock_irqsave(&ring->pending_free_lock, flags);
484 was_empty = list_empty(&ring->pending_free);
485 list_add(&req->free_list, &ring->pending_free);
486 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400487 if (was_empty)
Bob Liu59795702015-11-14 11:12:15 +0800488 wake_up(&ring->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400489}
490
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400491/*
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400492 * Routines for managing virtual block devices (vbds).
493 */
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400494static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
495 int operation)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400496{
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400497 struct xen_vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400498 int rc = -EACCES;
499
500 if ((operation != READ) && vbd->readonly)
501 goto out;
502
Jan Beulich8ab52152011-05-17 11:07:05 +0100503 if (likely(req->nr_sects)) {
504 blkif_sector_t end = req->sector_number + req->nr_sects;
505
506 if (unlikely(end < req->sector_number))
507 goto out;
508 if (unlikely(end > vbd_sz(vbd)))
509 goto out;
510 }
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400511
512 req->dev = vbd->pdevice;
513 req->bdev = vbd->bdev;
514 rc = 0;
515
516 out:
517 return rc;
518}
519
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400520static void xen_vbd_resize(struct xen_blkif *blkif)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400521{
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400522 struct xen_vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400523 struct xenbus_transaction xbt;
524 int err;
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400525 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400526 unsigned long long new_size = vbd_sz(vbd);
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400527
Tao Chen77387b82015-04-01 15:04:22 +0000528 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400529 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
Tao Chen77387b82015-04-01 15:04:22 +0000530 pr_info("VBD Resize: new size %llu\n", new_size);
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400531 vbd->size = new_size;
532again:
533 err = xenbus_transaction_start(&xbt);
534 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +0000535 pr_warn("Error starting transaction\n");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400536 return;
537 }
538 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400539 (unsigned long long)vbd_sz(vbd));
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400540 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +0000541 pr_warn("Error writing new size\n");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400542 goto abort;
543 }
544 /*
545 * Write the current state; we will use this to synchronize
546 * the front-end. If the current state is "connected" the
547 * front-end will get the new size information online.
548 */
549 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
550 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +0000551 pr_warn("Error writing the state\n");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400552 goto abort;
553 }
554
555 err = xenbus_transaction_end(xbt, 0);
556 if (err == -EAGAIN)
557 goto again;
558 if (err)
Tao Chen77387b82015-04-01 15:04:22 +0000559 pr_warn("Error ending transaction\n");
Laszlo Ersek496b3182011-05-13 09:45:40 -0400560 return;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400561abort:
562 xenbus_transaction_end(xbt, 1);
563}
564
565/*
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400566 * Notification from the guest OS.
567 */
Bob Liu59795702015-11-14 11:12:15 +0800568static void blkif_notify_work(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400569{
Bob Liu59795702015-11-14 11:12:15 +0800570 ring->waiting_reqs = 1;
571 wake_up(&ring->wq);
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400572}
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400573
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400574irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400575{
576 blkif_notify_work(dev_id);
577 return IRQ_HANDLED;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400578}
579
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400580/*
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400581 * SCHEDULER FUNCTIONS
582 */
583
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400584static void print_stats(struct xen_blkif *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400585{
Tao Chen77387b82015-04-01 15:04:22 +0000586 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200587 " | ds %4llu | pg: %4u/%4d\n",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -0400588 current->comm, blkif->st_oo_req,
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800589 blkif->st_rd_req, blkif->st_wr_req,
Roger Pau Monnec1a15d02013-04-17 20:18:55 +0200590 blkif->st_f_req, blkif->st_ds_req,
591 blkif->persistent_gnt_c,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200592 xen_blkif_max_pgrants);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400593 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
594 blkif->st_rd_req = 0;
595 blkif->st_wr_req = 0;
596 blkif->st_oo_req = 0;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800597 blkif->st_ds_req = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400598}
599
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400600int xen_blkif_schedule(void *arg)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400601{
Bob Liu59795702015-11-14 11:12:15 +0800602 struct xen_blkif_ring *ring = arg;
603 struct xen_blkif *blkif = ring->blkif;
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400604 struct xen_vbd *vbd = &blkif->vbd;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200605 unsigned long timeout;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500606 int ret;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400607
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400608 xen_blkif_get(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400609
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400610 while (!kthread_should_stop()) {
611 if (try_to_freeze())
612 continue;
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400613 if (unlikely(vbd->size != vbd_sz(vbd)))
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400614 xen_vbd_resize(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400615
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200616 timeout = msecs_to_jiffies(LRU_INTERVAL);
617
618 timeout = wait_event_interruptible_timeout(
Bob Liu59795702015-11-14 11:12:15 +0800619 ring->wq,
620 ring->waiting_reqs || kthread_should_stop(),
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200621 timeout);
622 if (timeout == 0)
623 goto purge_gnt_list;
624 timeout = wait_event_interruptible_timeout(
Bob Liu59795702015-11-14 11:12:15 +0800625 ring->pending_free_wq,
626 !list_empty(&ring->pending_free) ||
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200627 kthread_should_stop(),
628 timeout);
629 if (timeout == 0)
630 goto purge_gnt_list;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400631
Bob Liu59795702015-11-14 11:12:15 +0800632 ring->waiting_reqs = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400633 smp_mb(); /* clear flag *before* checking for work */
634
Bob Liu59795702015-11-14 11:12:15 +0800635 ret = do_block_io_op(ring);
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500636 if (ret > 0)
Bob Liu59795702015-11-14 11:12:15 +0800637 ring->waiting_reqs = 1;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500638 if (ret == -EACCES)
Bob Liu59795702015-11-14 11:12:15 +0800639 wait_event_interruptible(ring->shutdown_wq,
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500640 kthread_should_stop());
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400641
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200642purge_gnt_list:
643 if (blkif->vbd.feature_gnt_persistent &&
644 time_after(jiffies, blkif->next_lru)) {
645 purge_persistent_gnt(blkif);
646 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
647 }
648
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200649 /* Shrink if we have more than xen_blkif_max_buffer_pages */
650 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
651
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400652 if (log_stats && time_after(jiffies, blkif->st_print))
653 print_stats(blkif);
654 }
655
Roger Pau Monneef753412014-02-04 11:26:13 +0100656 /* Drain pending purge work */
657 flush_work(&blkif->persistent_purge_work);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200658
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400659 if (log_stats)
660 print_stats(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400661
Bob Liu59795702015-11-14 11:12:15 +0800662 ring->xenblkd = NULL;
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400663 xen_blkif_put(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400664
665 return 0;
666}
667
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400668/*
Roger Pau Monneef753412014-02-04 11:26:13 +0100669 * Remove persistent grants and empty the pool of free pages
670 */
Bob Liu59795702015-11-14 11:12:15 +0800671void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
Roger Pau Monneef753412014-02-04 11:26:13 +0100672{
Bob Liu59795702015-11-14 11:12:15 +0800673 struct xen_blkif *blkif = ring->blkif;
674 unsigned long flags;
675
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400676 /* Free all persistent grant pages */
Bob Liu59795702015-11-14 11:12:15 +0800677 spin_lock_irqsave(&blkif->pers_gnts_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400678 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
679 free_persistent_gnts(blkif, &blkif->persistent_gnts,
680 blkif->persistent_gnt_c);
681
682 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
683 blkif->persistent_gnt_c = 0;
Bob Liu59795702015-11-14 11:12:15 +0800684 spin_unlock_irqrestore(&blkif->pers_gnts_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400685
Matt Rushton2ed22e32014-02-04 11:26:12 +0100686 /* Since we are shutting down remove all pages from the buffer */
687 shrink_free_pagepool(blkif, 0 /* All */);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400688}
689
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000690static unsigned int xen_blkbk_unmap_prepare(
Bob Liu59795702015-11-14 11:12:15 +0800691 struct xen_blkif_ring *ring,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000692 struct grant_page **pages,
693 unsigned int num,
694 struct gnttab_unmap_grant_ref *unmap_ops,
695 struct page **unmap_pages)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400696{
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400697 unsigned int i, invcount = 0;
Bob Liu59795702015-11-14 11:12:15 +0800698 unsigned long flags;
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400699
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200700 for (i = 0; i < num; i++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200701 if (pages[i]->persistent_gnt != NULL) {
Bob Liu59795702015-11-14 11:12:15 +0800702 spin_lock_irqsave(&ring->blkif->pers_gnts_lock, flags);
703 put_persistent_gnt(ring->blkif, pages[i]->persistent_gnt);
704 spin_unlock_irqrestore(&ring->blkif->pers_gnts_lock, flags);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200705 continue;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200706 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200707 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400708 continue;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200709 unmap_pages[invcount] = pages[i]->page;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000710 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200711 GNTMAP_host_map, pages[i]->handle);
712 pages[i]->handle = BLKBACK_INVALID_HANDLE;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000713 invcount++;
714 }
715
716 return invcount;
717}
718
719static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
720{
Bob Liu59795702015-11-14 11:12:15 +0800721 struct pending_req *pending_req = (struct pending_req *)(data->data);
722 struct xen_blkif_ring *ring = pending_req->ring;
723 struct xen_blkif *blkif = ring->blkif;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000724
725 /* BUG_ON used to reproduce existing behaviour,
726 but is this the best way to deal with this? */
727 BUG_ON(result);
728
729 put_free_pages(blkif, data->pages, data->count);
Bob Liu59795702015-11-14 11:12:15 +0800730 make_response(ring, pending_req->id,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000731 pending_req->operation, pending_req->status);
Bob Liu59795702015-11-14 11:12:15 +0800732 free_req(ring, pending_req);
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000733 /*
734 * Make sure the request is freed before releasing blkif,
735 * or there could be a race between free_req and the
736 * cleanup done in xen_blkif_free during shutdown.
737 *
738 * NB: The fact that we might try to wake up pending_free_wq
739 * before drain_complete (in case there's a drain going on)
740 * it's not a problem with our current implementation
741 * because we can assure there's no thread waiting on
742 * pending_free_wq if there's a drain going on, but it has
743 * to be taken into account if the current model is changed.
744 */
Bob Liu59795702015-11-14 11:12:15 +0800745 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000746 complete(&blkif->drain_complete);
747 }
748 xen_blkif_put(blkif);
749}
750
751static void xen_blkbk_unmap_and_respond(struct pending_req *req)
752{
753 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
Bob Liu59795702015-11-14 11:12:15 +0800754 struct xen_blkif_ring *ring = req->ring;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000755 struct grant_page **pages = req->segments;
756 unsigned int invcount;
757
Bob Liu59795702015-11-14 11:12:15 +0800758 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000759 req->unmap, req->unmap_pages);
760
761 work->data = req;
762 work->done = xen_blkbk_unmap_and_respond_callback;
763 work->unmap_ops = req->unmap;
764 work->kunmap_ops = NULL;
765 work->pages = req->unmap_pages;
766 work->count = invcount;
767
768 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
769}
770
771
772/*
773 * Unmap the grant references.
774 *
775 * This could accumulate ops up to the batch size to reduce the number
776 * of hypercalls, but since this is only used in error paths there's
777 * no real need.
778 */
Bob Liu59795702015-11-14 11:12:15 +0800779static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000780 struct grant_page *pages[],
781 int num)
782{
783 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
784 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
785 unsigned int invcount = 0;
786 int ret;
787
788 while (num) {
789 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
Bob Liu59795702015-11-14 11:12:15 +0800790
791 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000792 unmap, unmap_pages);
793 if (invcount) {
794 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200795 BUG_ON(ret);
Bob Liu59795702015-11-14 11:12:15 +0800796 put_free_pages(ring->blkif, unmap_pages, invcount);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200797 }
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000798 pages += batch;
799 num -= batch;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200800 }
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400801}
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400802
Bob Liu59795702015-11-14 11:12:15 +0800803static int xen_blkbk_map(struct xen_blkif_ring *ring,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200804 struct grant_page *pages[],
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200805 int num, bool ro)
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400806{
807 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200808 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
809 struct persistent_gnt *persistent_gnt = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200810 phys_addr_t addr = 0;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200811 int i, seg_idx, new_map_idx;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200812 int segs_to_map = 0;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400813 int ret = 0;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200814 int last_map = 0, map_until = 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200815 int use_persistent_gnts;
Bob Liu59795702015-11-14 11:12:15 +0800816 struct xen_blkif *blkif = ring->blkif;
817 unsigned long irq_flags;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200818
819 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
820
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400821 /*
822 * Fill out preq.nr_sects with proper amount of sectors, and setup
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400823 * assign map[..] with the PFN of the page in our domain with the
824 * corresponding grant reference for each page.
825 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200826again:
827 for (i = map_until; i < num; i++) {
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400828 uint32_t flags;
829
Bob Liu59795702015-11-14 11:12:15 +0800830 if (use_persistent_gnts) {
831 spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200832 persistent_gnt = get_persistent_gnt(
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200833 blkif,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200834 pages[i]->gref);
Bob Liu59795702015-11-14 11:12:15 +0800835 spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
836 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200837
838 if (persistent_gnt) {
839 /*
840 * We are using persistent grants and
841 * the grant is already mapped
842 */
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200843 pages[i]->page = persistent_gnt->page;
844 pages[i]->persistent_gnt = persistent_gnt;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200845 } else {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200846 if (get_free_page(blkif, &pages[i]->page))
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200847 goto out_of_memory;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200848 addr = vaddr(pages[i]->page);
849 pages_to_gnt[segs_to_map] = pages[i]->page;
850 pages[i]->persistent_gnt = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200851 flags = GNTMAP_host_map;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200852 if (!use_persistent_gnts && ro)
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200853 flags |= GNTMAP_readonly;
854 gnttab_set_map_op(&map[segs_to_map++], addr,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200855 flags, pages[i]->gref,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200856 blkif->domid);
857 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200858 map_until = i + 1;
859 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
860 break;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400861 }
862
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200863 if (segs_to_map) {
864 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
865 BUG_ON(ret);
866 }
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400867
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400868 /*
869 * Now swizzle the MFN in our domain with the MFN from the other domain
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400870 * so that when we access vaddr(pending_req,i) it has the contents of
871 * the page from the other domain.
872 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200873 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200874 if (!pages[seg_idx]->persistent_gnt) {
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200875 /* This is a newly mapped grant */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200876 BUG_ON(new_map_idx >= segs_to_map);
877 if (unlikely(map[new_map_idx].status != 0)) {
Tao Chen77387b82015-04-01 15:04:22 +0000878 pr_debug("invalid buffer -- could not remap it\n");
Roger Pau Monné61cecca2014-09-15 11:55:27 +0200879 put_free_pages(blkif, &pages[seg_idx]->page, 1);
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200880 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200881 ret |= 1;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200882 goto next;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200883 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200884 pages[seg_idx]->handle = map[new_map_idx].handle;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200885 } else {
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200886 continue;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200887 }
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200888 if (use_persistent_gnts &&
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200889 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200890 /*
891 * We are using persistent grants, the grant is
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200892 * not mapped but we might have room for it.
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200893 */
894 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
895 GFP_KERNEL);
896 if (!persistent_gnt) {
897 /*
898 * If we don't have enough memory to
899 * allocate the persistent_gnt struct
900 * map this grant non-persistenly
901 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200902 goto next;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200903 }
904 persistent_gnt->gnt = map[new_map_idx].ref;
905 persistent_gnt->handle = map[new_map_idx].handle;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200906 persistent_gnt->page = pages[seg_idx]->page;
Bob Liu59795702015-11-14 11:12:15 +0800907 spin_lock_irqsave(&blkif->pers_gnts_lock, irq_flags);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200908 if (add_persistent_gnt(blkif,
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200909 persistent_gnt)) {
Bob Liu59795702015-11-14 11:12:15 +0800910 spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200911 kfree(persistent_gnt);
912 persistent_gnt = NULL;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200913 goto next;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200914 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200915 pages[seg_idx]->persistent_gnt = persistent_gnt;
Tao Chen77387b82015-04-01 15:04:22 +0000916 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200917 persistent_gnt->gnt, blkif->persistent_gnt_c,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200918 xen_blkif_max_pgrants);
Bob Liu59795702015-11-14 11:12:15 +0800919 spin_unlock_irqrestore(&blkif->pers_gnts_lock, irq_flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200920 goto next;
921 }
922 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
923 blkif->vbd.overflow_max_grants = 1;
Tao Chen77387b82015-04-01 15:04:22 +0000924 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200925 blkif->domid, blkif->vbd.handle);
926 }
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200927 /*
928 * We could not map this grant persistently, so use it as
929 * a non-persistent grant.
930 */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200931next:
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200932 new_map_idx++;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400933 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200934 segs_to_map = 0;
935 last_map = map_until;
936 if (map_until != num)
937 goto again;
938
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400939 return ret;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200940
941out_of_memory:
Tao Chen77387b82015-04-01 15:04:22 +0000942 pr_alert("%s: out of memory\n", __func__);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200943 put_free_pages(blkif, pages_to_gnt, segs_to_map);
944 return -ENOMEM;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400945}
946
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200947static int xen_blkbk_map_seg(struct pending_req *pending_req)
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200948{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200949 int rc;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200950
Bob Liu59795702015-11-14 11:12:15 +0800951 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
Julien Grall6684fa12015-06-17 15:28:08 +0100952 pending_req->nr_segs,
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200953 (pending_req->operation != BLKIF_OP_READ));
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200954
955 return rc;
956}
957
958static int xen_blkbk_parse_indirect(struct blkif_request *req,
959 struct pending_req *pending_req,
960 struct seg_buf seg[],
961 struct phys_req *preq)
962{
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200963 struct grant_page **pages = pending_req->indirect_pages;
Bob Liu59795702015-11-14 11:12:15 +0800964 struct xen_blkif_ring *ring = pending_req->ring;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200965 int indirect_grefs, rc, n, nseg, i;
Roger Pau Monne80bfa2f2014-02-04 11:26:15 +0100966 struct blkif_request_segment *segments = NULL;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200967
Julien Grall6684fa12015-06-17 15:28:08 +0100968 nseg = pending_req->nr_segs;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200969 indirect_grefs = INDIRECT_PAGES(nseg);
970 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
971
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200972 for (i = 0; i < indirect_grefs; i++)
973 pages[i]->gref = req->u.indirect.indirect_grefs[i];
974
Bob Liu59795702015-11-14 11:12:15 +0800975 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200976 if (rc)
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200977 goto unmap;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200978
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200979 for (n = 0, i = 0; n < nseg; n++) {
980 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
981 /* Map indirect segments */
982 if (segments)
983 kunmap_atomic(segments);
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200984 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200985 }
986 i = n % SEGS_PER_INDIRECT_FRAME;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200987 pending_req->segments[n]->gref = segments[i].gref;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200988 seg[n].nsec = segments[i].last_sect -
989 segments[i].first_sect + 1;
990 seg[n].offset = (segments[i].first_sect << 9);
Julien Grall67de5df2015-05-05 16:25:56 +0100991 if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200992 (segments[i].last_sect < segments[i].first_sect)) {
993 rc = -EINVAL;
994 goto unmap;
995 }
996 preq->nr_sects += seg[n].nsec;
997 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200998
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200999unmap:
1000 if (segments)
1001 kunmap_atomic(segments);
Bob Liu59795702015-11-14 11:12:15 +08001002 xen_blkbk_unmap(ring, pages, indirect_grefs);
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001003 return rc;
Roger Pau Monne31552ee2013-04-17 20:19:00 +02001004}
1005
Bob Liu59795702015-11-14 11:12:15 +08001006static int dispatch_discard_io(struct xen_blkif_ring *ring,
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001007 struct blkif_request *req)
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001008{
1009 int err = 0;
1010 int status = BLKIF_RSP_OKAY;
Bob Liu59795702015-11-14 11:12:15 +08001011 struct xen_blkif *blkif = ring->blkif;
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001012 struct block_device *bdev = blkif->vbd.bdev;
Konrad Rzeszutek Wilk4dae7672012-03-13 18:43:23 -04001013 unsigned long secure;
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001014 struct phys_req preq;
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001015
Vegard Nossumea5ec762013-09-05 13:00:14 +02001016 xen_blkif_get(blkif);
1017
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001018 preq.sector_number = req->u.discard.sector_number;
1019 preq.nr_sects = req->u.discard.nr_sectors;
1020
1021 err = xen_vbd_translate(&preq, blkif, WRITE);
1022 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +00001023 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001024 preq.sector_number,
1025 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1026 goto fail_response;
1027 }
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001028 blkif->st_ds_req++;
1029
Konrad Rzeszutek Wilk4dae7672012-03-13 18:43:23 -04001030 secure = (blkif->vbd.discard_secure &&
1031 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1032 BLKDEV_DISCARD_SECURE : 0;
1033
1034 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1035 req->u.discard.nr_sectors,
1036 GFP_KERNEL, secure);
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001037fail_response:
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001038 if (err == -EOPNOTSUPP) {
Tao Chen77387b82015-04-01 15:04:22 +00001039 pr_debug("discard op failed, not supported\n");
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001040 status = BLKIF_RSP_EOPNOTSUPP;
1041 } else if (err)
1042 status = BLKIF_RSP_ERROR;
1043
Bob Liu59795702015-11-14 11:12:15 +08001044 make_response(ring, req->u.discard.id, req->operation, status);
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001045 xen_blkif_put(blkif);
1046 return err;
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001047}
1048
Bob Liu59795702015-11-14 11:12:15 +08001049static int dispatch_other_io(struct xen_blkif_ring *ring,
David Vrabel0e367ae2013-03-07 17:32:01 +00001050 struct blkif_request *req,
1051 struct pending_req *pending_req)
1052{
Bob Liu59795702015-11-14 11:12:15 +08001053 free_req(ring, pending_req);
1054 make_response(ring, req->u.other.id, req->operation,
David Vrabel0e367ae2013-03-07 17:32:01 +00001055 BLKIF_RSP_EOPNOTSUPP);
1056 return -EIO;
1057}
1058
Bob Liu59795702015-11-14 11:12:15 +08001059static void xen_blk_drain_io(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001060{
Bob Liu59795702015-11-14 11:12:15 +08001061 struct xen_blkif *blkif = ring->blkif;
1062
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001063 atomic_set(&blkif->drain, 1);
1064 do {
Bob Liu59795702015-11-14 11:12:15 +08001065 if (atomic_read(&ring->inflight) == 0)
Konrad Rzeszutek Wilk6927d922011-10-17 14:27:48 -04001066 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001067 wait_for_completion_interruptible_timeout(
1068 &blkif->drain_complete, HZ);
1069
1070 if (!atomic_read(&blkif->drain))
1071 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001072 } while (!kthread_should_stop());
1073 atomic_set(&blkif->drain, 0);
1074}
1075
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -04001076/*
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001077 * Completion callback on the bio's. Called as bh->b_end_io()
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001078 */
1079
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001080static void __end_block_io_op(struct pending_req *pending_req, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001081{
1082 /* An error fails the entire request. */
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001083 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001084 (error == -EOPNOTSUPP)) {
Tao Chen77387b82015-04-01 15:04:22 +00001085 pr_debug("flush diskcache op failed, not supported\n");
Bob Liu59795702015-11-14 11:12:15 +08001086 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001087 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001088 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1089 (error == -EOPNOTSUPP)) {
Tao Chen77387b82015-04-01 15:04:22 +00001090 pr_debug("write barrier op failed, not supported\n");
Bob Liu59795702015-11-14 11:12:15 +08001091 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001092 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001093 } else if (error) {
Tao Chen77387b82015-04-01 15:04:22 +00001094 pr_debug("Buffer not up-to-date at end of operation,"
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001095 " error=%d\n", error);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001096 pending_req->status = BLKIF_RSP_ERROR;
1097 }
1098
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001099 /*
1100 * If all of the bio's have completed it is time to unmap
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001101 * the grant references associated with 'request' and provide
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001102 * the proper response on the ring.
1103 */
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +00001104 if (atomic_dec_and_test(&pending_req->pendcnt))
1105 xen_blkbk_unmap_and_respond(pending_req);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001106}
1107
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001108/*
1109 * bio callback.
1110 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001111static void end_block_io_op(struct bio *bio)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001112{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001113 __end_block_io_op(bio->bi_private, bio->bi_error);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001114 bio_put(bio);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001115}
1116
1117
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001118
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001119/*
1120 * Function to copy the from the ring buffer the 'struct blkif_request'
1121 * (which has the sectors we want, number of them, grant references, etc),
1122 * and transmute it to the block API to hand it over to the proper block disk.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001123 */
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001124static int
Bob Liu59795702015-11-14 11:12:15 +08001125__do_block_io_op(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001126{
Bob Liu59795702015-11-14 11:12:15 +08001127 union blkif_back_rings *blk_rings = &ring->blk_rings;
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -08001128 struct blkif_request req;
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001129 struct pending_req *pending_req;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001130 RING_IDX rc, rp;
1131 int more_to_do = 0;
1132
1133 rc = blk_rings->common.req_cons;
1134 rp = blk_rings->common.sring->req_prod;
1135 rmb(); /* Ensure we see queued requests up to 'rp'. */
1136
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -05001137 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1138 rc = blk_rings->common.rsp_prod_pvt;
Tao Chen77387b82015-04-01 15:04:22 +00001139 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
Bob Liu59795702015-11-14 11:12:15 +08001140 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -05001141 return -EACCES;
1142 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001143 while (rc != rp) {
1144
1145 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1146 break;
1147
Keir Fraser8270b452009-03-06 08:29:15 +00001148 if (kthread_should_stop()) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001149 more_to_do = 1;
1150 break;
1151 }
1152
Bob Liu59795702015-11-14 11:12:15 +08001153 pending_req = alloc_req(ring);
Keir Fraser8270b452009-03-06 08:29:15 +00001154 if (NULL == pending_req) {
Bob Liu59795702015-11-14 11:12:15 +08001155 ring->blkif->st_oo_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001156 more_to_do = 1;
1157 break;
1158 }
1159
Bob Liu59795702015-11-14 11:12:15 +08001160 switch (ring->blkif->blk_protocol) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001161 case BLKIF_PROTOCOL_NATIVE:
1162 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1163 break;
1164 case BLKIF_PROTOCOL_X86_32:
1165 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1166 break;
1167 case BLKIF_PROTOCOL_X86_64:
1168 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1169 break;
1170 default:
1171 BUG();
1172 }
1173 blk_rings->common.req_cons = ++rc; /* before make_response() */
1174
1175 /* Apply all sanity checks to /private copy/ of request. */
1176 barrier();
David Vrabel0e367ae2013-03-07 17:32:01 +00001177
1178 switch (req.operation) {
1179 case BLKIF_OP_READ:
1180 case BLKIF_OP_WRITE:
1181 case BLKIF_OP_WRITE_BARRIER:
1182 case BLKIF_OP_FLUSH_DISKCACHE:
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001183 case BLKIF_OP_INDIRECT:
Bob Liu59795702015-11-14 11:12:15 +08001184 if (dispatch_rw_block_io(ring, &req, pending_req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001185 goto done;
1186 break;
1187 case BLKIF_OP_DISCARD:
Bob Liu59795702015-11-14 11:12:15 +08001188 free_req(ring, pending_req);
1189 if (dispatch_discard_io(ring, &req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001190 goto done;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001191 break;
David Vrabel0e367ae2013-03-07 17:32:01 +00001192 default:
Bob Liu59795702015-11-14 11:12:15 +08001193 if (dispatch_other_io(ring, &req, pending_req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001194 goto done;
1195 break;
1196 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001197
1198 /* Yield point for this unbounded loop. */
1199 cond_resched();
1200 }
David Vrabel0e367ae2013-03-07 17:32:01 +00001201done:
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001202 return more_to_do;
1203}
1204
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001205static int
Bob Liu59795702015-11-14 11:12:15 +08001206do_block_io_op(struct xen_blkif_ring *ring)
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001207{
Bob Liu59795702015-11-14 11:12:15 +08001208 union blkif_back_rings *blk_rings = &ring->blk_rings;
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001209 int more_to_do;
1210
1211 do {
Bob Liu59795702015-11-14 11:12:15 +08001212 more_to_do = __do_block_io_op(ring);
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001213 if (more_to_do)
1214 break;
1215
1216 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1217 } while (more_to_do);
1218
1219 return more_to_do;
1220}
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001221/*
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001222 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1223 * and call the 'submit_bio' to pass it to the underlying storage.
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001224 */
Bob Liu59795702015-11-14 11:12:15 +08001225static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -04001226 struct blkif_request *req,
1227 struct pending_req *pending_req)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001228{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001229 struct phys_req preq;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001230 struct seg_buf *seg = pending_req->seg;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001231 unsigned int nseg;
1232 struct bio *bio = NULL;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001233 struct bio **biolist = pending_req->biolist;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001234 int i, nbio = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001235 int operation;
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001236 struct blk_plug plug;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001237 bool drain = false;
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001238 struct grant_page **pages = pending_req->segments;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001239 unsigned short req_operation;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001240
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001241 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1242 req->u.indirect.indirect_op : req->operation;
Julien Grall67de5df2015-05-05 16:25:56 +01001243
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001244 if ((req->operation == BLKIF_OP_INDIRECT) &&
1245 (req_operation != BLKIF_OP_READ) &&
1246 (req_operation != BLKIF_OP_WRITE)) {
Tao Chen77387b82015-04-01 15:04:22 +00001247 pr_debug("Invalid indirect operation (%u)\n", req_operation);
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001248 goto fail_response;
1249 }
1250
1251 switch (req_operation) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001252 case BLKIF_OP_READ:
Bob Liu59795702015-11-14 11:12:15 +08001253 ring->blkif->st_rd_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001254 operation = READ;
1255 break;
1256 case BLKIF_OP_WRITE:
Bob Liu59795702015-11-14 11:12:15 +08001257 ring->blkif->st_wr_req++;
Konrad Rzeszutek Wilk013c3ca2011-04-26 16:24:18 -04001258 operation = WRITE_ODIRECT;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001259 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001260 case BLKIF_OP_WRITE_BARRIER:
1261 drain = true;
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001262 case BLKIF_OP_FLUSH_DISKCACHE:
Bob Liu59795702015-11-14 11:12:15 +08001263 ring->blkif->st_f_req++;
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001264 operation = WRITE_FLUSH;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001265 break;
1266 default:
1267 operation = 0; /* make gcc happy */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001268 goto fail_response;
1269 break;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001270 }
1271
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001272 /* Check that the number of segments is sane. */
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001273 nseg = req->operation == BLKIF_OP_INDIRECT ?
1274 req->u.indirect.nr_segments : req->u.rw.nr_segments;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -04001275
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001276 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001277 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1278 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1279 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1280 (nseg > MAX_INDIRECT_SEGMENTS))) {
Tao Chen77387b82015-04-01 15:04:22 +00001281 pr_debug("Bad number of segments in request (%d)\n", nseg);
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001282 /* Haven't submitted any bio's yet. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001283 goto fail_response;
1284 }
1285
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001286 preq.nr_sects = 0;
1287
Bob Liu59795702015-11-14 11:12:15 +08001288 pending_req->ring = ring;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -04001289 pending_req->id = req->u.rw.id;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001290 pending_req->operation = req_operation;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001291 pending_req->status = BLKIF_RSP_OKAY;
Julien Grall6684fa12015-06-17 15:28:08 +01001292 pending_req->nr_segs = nseg;
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001293
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001294 if (req->operation != BLKIF_OP_INDIRECT) {
1295 preq.dev = req->u.rw.handle;
1296 preq.sector_number = req->u.rw.sector_number;
1297 for (i = 0; i < nseg; i++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001298 pages[i]->gref = req->u.rw.seg[i].gref;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001299 seg[i].nsec = req->u.rw.seg[i].last_sect -
1300 req->u.rw.seg[i].first_sect + 1;
1301 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
Julien Grall67de5df2015-05-05 16:25:56 +01001302 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001303 (req->u.rw.seg[i].last_sect <
1304 req->u.rw.seg[i].first_sect))
1305 goto fail_response;
1306 preq.nr_sects += seg[i].nsec;
1307 }
1308 } else {
1309 preq.dev = req->u.indirect.handle;
1310 preq.sector_number = req->u.indirect.sector_number;
1311 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001312 goto fail_response;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001313 }
1314
Bob Liu59795702015-11-14 11:12:15 +08001315 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
Tao Chen77387b82015-04-01 15:04:22 +00001316 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001317 operation == READ ? "read" : "write",
1318 preq.sector_number,
Chen Ganga72d9002013-02-28 10:34:23 +08001319 preq.sector_number + preq.nr_sects,
Bob Liu59795702015-11-14 11:12:15 +08001320 ring->blkif->vbd.pdevice);
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001321 goto fail_response;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001322 }
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001323
1324 /*
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -04001325 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001326 * is set there.
1327 */
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001328 for (i = 0; i < nseg; i++) {
1329 if (((int)preq.sector_number|(int)seg[i].nsec) &
1330 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
Tao Chen77387b82015-04-01 15:04:22 +00001331 pr_debug("Misaligned I/O request from domain %d\n",
Bob Liu59795702015-11-14 11:12:15 +08001332 ring->blkif->domid);
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001333 goto fail_response;
1334 }
1335 }
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001336
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001337 /* Wait on all outstanding I/O's and once that has been completed
1338 * issue the WRITE_FLUSH.
1339 */
1340 if (drain)
Bob Liu59795702015-11-14 11:12:15 +08001341 xen_blk_drain_io(pending_req->ring);
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001342
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001343 /*
1344 * If we have failed at this point, we need to undo the M2P override,
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001345 * set gnttab_set_unmap_op on all of the grant references and perform
1346 * the hypercall to unmap the grants - that is all done in
Konrad Rzeszutek Wilk9f3aedf2011-04-15 11:50:34 -04001347 * xen_blkbk_unmap.
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001348 */
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001349 if (xen_blkbk_map_seg(pending_req))
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001350 goto fail_flush;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001351
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001352 /*
1353 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1354 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1355 */
Bob Liu59795702015-11-14 11:12:15 +08001356 xen_blkif_get(ring->blkif);
1357 atomic_inc(&ring->inflight);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001358
1359 for (i = 0; i < nseg; i++) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001360 while ((bio == NULL) ||
1361 (bio_add_page(bio,
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001362 pages[i]->page,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001363 seg[i].nsec << 9,
Roger Pau Monneffb1dab2013-03-18 17:49:32 +01001364 seg[i].offset) == 0)) {
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001365
Roger Pau Monne1e0f7a22013-06-22 09:59:17 +02001366 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1367 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001368 if (unlikely(bio == NULL))
1369 goto fail_put_bio;
1370
Konrad Rzeszutek Wilk03e0edf2011-05-12 16:19:23 -04001371 biolist[nbio++] = bio;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001372 bio->bi_bdev = preq.bdev;
1373 bio->bi_private = pending_req;
1374 bio->bi_end_io = end_block_io_op;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001375 bio->bi_iter.bi_sector = preq.sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001376 }
1377
1378 preq.sector_number += seg[i].nsec;
1379 }
1380
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001381 /* This will be hit if the operation was a flush or discard. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001382 if (!bio) {
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001383 BUG_ON(operation != WRITE_FLUSH);
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -04001384
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001385 bio = bio_alloc(GFP_KERNEL, 0);
1386 if (unlikely(bio == NULL))
1387 goto fail_put_bio;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001388
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001389 biolist[nbio++] = bio;
1390 bio->bi_bdev = preq.bdev;
1391 bio->bi_private = pending_req;
1392 bio->bi_end_io = end_block_io_op;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001393 }
1394
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001395 atomic_set(&pending_req->pendcnt, nbio);
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001396 blk_start_plug(&plug);
1397
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001398 for (i = 0; i < nbio; i++)
1399 submit_bio(operation, biolist[i]);
1400
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001401 /* Let the I/Os go.. */
Konrad Rzeszutek Wilk3d68b392011-05-05 13:42:10 -04001402 blk_finish_plug(&plug);
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001403
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001404 if (operation == READ)
Bob Liu59795702015-11-14 11:12:15 +08001405 ring->blkif->st_rd_sect += preq.nr_sects;
Konrad Rzeszutek Wilk5c62cb42011-10-10 12:33:21 -04001406 else if (operation & WRITE)
Bob Liu59795702015-11-14 11:12:15 +08001407 ring->blkif->st_wr_sect += preq.nr_sects;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001408
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001409 return 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001410
1411 fail_flush:
Bob Liu59795702015-11-14 11:12:15 +08001412 xen_blkbk_unmap(ring, pending_req->segments,
Julien Grall6684fa12015-06-17 15:28:08 +01001413 pending_req->nr_segs);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001414 fail_response:
Konrad Rzeszutek Wilk0faa8cc2011-04-14 17:58:19 -04001415 /* Haven't submitted any bio's yet. */
Bob Liu59795702015-11-14 11:12:15 +08001416 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1417 free_req(ring, pending_req);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001418 msleep(1); /* back off a bit */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001419 return -EIO;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001420
1421 fail_put_bio:
Konrad Rzeszutek Wilk03e0edf2011-05-12 16:19:23 -04001422 for (i = 0; i < nbio; i++)
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001423 bio_put(biolist[i]);
Jan Beulich0e5e0982013-03-11 09:39:55 +00001424 atomic_set(&pending_req->pendcnt, 1);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001425 __end_block_io_op(pending_req, -EINVAL);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001426 msleep(1); /* back off a bit */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001427 return -EIO;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001428}
1429
1430
1431
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001432/*
1433 * Put a response on the ring on how the operation fared.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001434 */
Bob Liu59795702015-11-14 11:12:15 +08001435static void make_response(struct xen_blkif_ring *ring, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001436 unsigned short op, int st)
1437{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -08001438 struct blkif_response resp;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001439 unsigned long flags;
Bob Liu59795702015-11-14 11:12:15 +08001440 union blkif_back_rings *blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001441 int notify;
1442
1443 resp.id = id;
1444 resp.operation = op;
1445 resp.status = st;
1446
Bob Liu59795702015-11-14 11:12:15 +08001447 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1448 blk_rings = &ring->blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001449 /* Place on the response ring for the relevant domain. */
Bob Liu59795702015-11-14 11:12:15 +08001450 switch (ring->blkif->blk_protocol) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001451 case BLKIF_PROTOCOL_NATIVE:
1452 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1453 &resp, sizeof(resp));
1454 break;
1455 case BLKIF_PROTOCOL_X86_32:
1456 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1457 &resp, sizeof(resp));
1458 break;
1459 case BLKIF_PROTOCOL_X86_64:
1460 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1461 &resp, sizeof(resp));
1462 break;
1463 default:
1464 BUG();
1465 }
1466 blk_rings->common.rsp_prod_pvt++;
1467 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
Bob Liu59795702015-11-14 11:12:15 +08001468 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001469 if (notify)
Bob Liu59795702015-11-14 11:12:15 +08001470 notify_remote_via_irq(ring->irq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001471}
1472
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001473static int __init xen_blkif_init(void)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001474{
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001475 int rc = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001476
Daniel De Graafb2167ba2011-11-28 11:49:05 -05001477 if (!xen_domain())
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001478 return -ENODEV;
1479
Julien Grall9cce2912015-10-13 17:50:11 +01001480 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
Bob Liu86839c52015-06-03 13:40:03 +08001481 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
Julien Grall9cce2912015-10-13 17:50:11 +01001482 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1483 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
Bob Liu86839c52015-06-03 13:40:03 +08001484 }
1485
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001486 rc = xen_blkif_interface_init();
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001487 if (rc)
1488 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001489
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001490 rc = xen_blkif_xenbus_init();
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001491 if (rc)
1492 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001493
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001494 failed_init:
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001495 return rc;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001496}
1497
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001498module_init(xen_blkif_init);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001499
1500MODULE_LICENSE("Dual BSD/GPL");
Bastian Blanka7e93572011-06-29 14:40:50 +02001501MODULE_ALIAS("xen-backend:vbd");