blob: 0e824091a12fac8757c2ade5d4e5dae6a1470cbd [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/******************************************************************************
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04002 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04007 * drivers/block/xen-blkfront.c
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04008 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
Tao Chen77387b82015-04-01 15:04:22 +000037#define pr_fmt(fmt) "xen-blkback: " fmt
38
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040039#include <linux/spinlock.h>
40#include <linux/kthread.h>
41#include <linux/list.h>
42#include <linux/delay.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080043#include <linux/freezer.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020044#include <linux/bitmap.h>
Jeremy Fitzhardingeafd91d02009-09-15 14:12:37 -070045
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080046#include <xen/events.h>
47#include <xen/page.h>
Stefano Stabellinie79affc2012-08-08 17:21:14 +000048#include <xen/xen.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080049#include <asm/xen/hypervisor.h>
50#include <asm/xen/hypercall.h>
Roger Pau Monne087ffec2013-02-14 11:12:09 +010051#include <xen/balloon.h>
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +000052#include <xen/grant_table.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040053#include "common.h"
54
55/*
Roger Pau Monnec6cc1422013-04-17 20:18:56 +020056 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
59 *
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
62 * IO workloads.
63 */
64
Roger Pau Monne402b27f2013-04-18 16:06:54 +020065static int xen_blkif_max_buffer_pages = 1024;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +020066module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67MODULE_PARM_DESC(max_buffer_pages,
68"Maximum number of free pages to keep in each block backend buffer");
69
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020070/*
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
76 *
77 * When the list of persistent grants is full we clean it up using a LRU
78 * algorithm.
79 */
80
Roger Pau Monne402b27f2013-04-18 16:06:54 +020081static int xen_blkif_max_pgrants = 1056;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020082module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
85
86/*
Bob Liud62d8602015-11-14 11:12:17 +080087 * Maximum number of rings/queues blkback supports, allow as many queues as there
88 * are CPUs if user has not specified a value.
89 */
90unsigned int xenblk_max_queues;
91module_param_named(max_queues, xenblk_max_queues, uint, 0644);
92MODULE_PARM_DESC(max_queues,
93 "Maximum number of hardware queues per virtual disk." \
94 "By default it is the number of online CPUs.");
95
96/*
Bob Liu86839c52015-06-03 13:40:03 +080097 * Maximum order of pages to be used for the shared ring between front and
98 * backend, 4KB page granularity is used.
99 */
Julien Grall9cce2912015-10-13 17:50:11 +0100100unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
Bob Liu86839c52015-06-03 13:40:03 +0800101module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
102MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
103/*
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200104 * The LRU mechanism to clean the lists of persistent grants needs to
105 * be executed periodically. The time interval between consecutive executions
106 * of the purge mechanism is set in ms.
107 */
108#define LRU_INTERVAL 100
109
110/*
111 * When the persistent grants list is full we will remove unused grants
112 * from the list. The percent number of grants to be removed at each LRU
113 * execution.
114 */
115#define LRU_PERCENT_CLEAN 5
116
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400117/* Run-time switchable: /sys/module/blkback/parameters/ */
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400118static unsigned int log_stats;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400119module_param(log_stats, int, 0644);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400120
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400121#define BLKBACK_INVALID_HANDLE (~0)
122
David Vrabelff4b1562015-01-08 18:06:01 +0000123/* Number of free pages to remove on each call to gnttab_free_pages */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200124#define NUM_BATCH_FREE_PAGES 10
125
Bob Liud4bf0062015-11-14 11:12:19 +0800126static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400127{
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200128 unsigned long flags;
129
Bob Liud4bf0062015-11-14 11:12:19 +0800130 spin_lock_irqsave(&ring->free_pages_lock, flags);
131 if (list_empty(&ring->free_pages)) {
132 BUG_ON(ring->free_pages_num != 0);
133 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
David Vrabelff4b1562015-01-08 18:06:01 +0000134 return gnttab_alloc_pages(1, page);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200135 }
Bob Liud4bf0062015-11-14 11:12:19 +0800136 BUG_ON(ring->free_pages_num == 0);
137 page[0] = list_first_entry(&ring->free_pages, struct page, lru);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200138 list_del(&page[0]->lru);
Bob Liud4bf0062015-11-14 11:12:19 +0800139 ring->free_pages_num--;
140 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200141
142 return 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400143}
144
Bob Liud4bf0062015-11-14 11:12:19 +0800145static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200146 int num)
147{
148 unsigned long flags;
149 int i;
150
Bob Liud4bf0062015-11-14 11:12:19 +0800151 spin_lock_irqsave(&ring->free_pages_lock, flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200152 for (i = 0; i < num; i++)
Bob Liud4bf0062015-11-14 11:12:19 +0800153 list_add(&page[i]->lru, &ring->free_pages);
154 ring->free_pages_num += num;
155 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200156}
157
Bob Liud4bf0062015-11-14 11:12:19 +0800158static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200159{
160 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
161 struct page *page[NUM_BATCH_FREE_PAGES];
162 unsigned int num_pages = 0;
163 unsigned long flags;
164
Bob Liud4bf0062015-11-14 11:12:19 +0800165 spin_lock_irqsave(&ring->free_pages_lock, flags);
166 while (ring->free_pages_num > num) {
167 BUG_ON(list_empty(&ring->free_pages));
168 page[num_pages] = list_first_entry(&ring->free_pages,
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200169 struct page, lru);
170 list_del(&page[num_pages]->lru);
Bob Liud4bf0062015-11-14 11:12:19 +0800171 ring->free_pages_num--;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200172 if (++num_pages == NUM_BATCH_FREE_PAGES) {
Bob Liud4bf0062015-11-14 11:12:19 +0800173 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
David Vrabelff4b1562015-01-08 18:06:01 +0000174 gnttab_free_pages(num_pages, page);
Bob Liud4bf0062015-11-14 11:12:19 +0800175 spin_lock_irqsave(&ring->free_pages_lock, flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200176 num_pages = 0;
177 }
178 }
Bob Liud4bf0062015-11-14 11:12:19 +0800179 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200180 if (num_pages != 0)
David Vrabelff4b1562015-01-08 18:06:01 +0000181 gnttab_free_pages(num_pages, page);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200182}
183
184#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
185
Bob Liu59795702015-11-14 11:12:15 +0800186static int do_block_io_op(struct xen_blkif_ring *ring);
187static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -0400188 struct blkif_request *req,
189 struct pending_req *pending_req);
Bob Liu59795702015-11-14 11:12:15 +0800190static void make_response(struct xen_blkif_ring *ring, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400191 unsigned short op, int st);
192
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100193#define foreach_grant_safe(pos, n, rbtree, node) \
194 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
Roger Pau Monne217fd5e2013-03-18 17:49:33 +0100195 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200196 &(pos)->node != NULL; \
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100197 (pos) = container_of(n, typeof(*(pos)), node), \
198 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200199
200
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200201/*
202 * We don't need locking around the persistent grant helpers
Bob Liud4bf0062015-11-14 11:12:19 +0800203 * because blkback uses a single-thread for each backend, so we
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200204 * can be sure that this functions will never be called recursively.
205 *
206 * The only exception to that is put_persistent_grant, that can be called
207 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
208 * bit operations to modify the flags of a persistent grant and to count
209 * the number of used grants.
210 */
Bob Liud4bf0062015-11-14 11:12:19 +0800211static int add_persistent_gnt(struct xen_blkif_ring *ring,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200212 struct persistent_gnt *persistent_gnt)
213{
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200214 struct rb_node **new = NULL, *parent = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200215 struct persistent_gnt *this;
Bob Liud4bf0062015-11-14 11:12:19 +0800216 struct xen_blkif *blkif = ring->blkif;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200217
Bob Liud4bf0062015-11-14 11:12:19 +0800218 if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200219 if (!blkif->vbd.overflow_max_grants)
220 blkif->vbd.overflow_max_grants = 1;
221 return -EBUSY;
222 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200223 /* Figure out where to put new node */
Bob Liud4bf0062015-11-14 11:12:19 +0800224 new = &ring->persistent_gnts.rb_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200225 while (*new) {
226 this = container_of(*new, struct persistent_gnt, node);
227
228 parent = *new;
229 if (persistent_gnt->gnt < this->gnt)
230 new = &((*new)->rb_left);
231 else if (persistent_gnt->gnt > this->gnt)
232 new = &((*new)->rb_right);
233 else {
Tao Chen77387b82015-04-01 15:04:22 +0000234 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200235 return -EINVAL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200236 }
237 }
238
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200239 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
240 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200241 /* Add new node and rebalance tree. */
242 rb_link_node(&(persistent_gnt->node), parent, new);
Bob Liud4bf0062015-11-14 11:12:19 +0800243 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
244 ring->persistent_gnt_c++;
245 atomic_inc(&ring->persistent_gnt_in_use);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200246 return 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200247}
248
Bob Liud4bf0062015-11-14 11:12:19 +0800249static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200250 grant_ref_t gref)
251{
252 struct persistent_gnt *data;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200253 struct rb_node *node = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200254
Bob Liud4bf0062015-11-14 11:12:19 +0800255 node = ring->persistent_gnts.rb_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200256 while (node) {
257 data = container_of(node, struct persistent_gnt, node);
258
259 if (gref < data->gnt)
260 node = node->rb_left;
261 else if (gref > data->gnt)
262 node = node->rb_right;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200263 else {
264 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
Tao Chen77387b82015-04-01 15:04:22 +0000265 pr_alert_ratelimited("requesting a grant already in use\n");
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200266 return NULL;
267 }
268 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
Bob Liud4bf0062015-11-14 11:12:19 +0800269 atomic_inc(&ring->persistent_gnt_in_use);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200270 return data;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200271 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200272 }
273 return NULL;
274}
275
Bob Liud4bf0062015-11-14 11:12:19 +0800276static void put_persistent_gnt(struct xen_blkif_ring *ring,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200277 struct persistent_gnt *persistent_gnt)
278{
279 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
Tao Chen77387b82015-04-01 15:04:22 +0000280 pr_alert_ratelimited("freeing a grant already unused\n");
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200281 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
282 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
Bob Liud4bf0062015-11-14 11:12:19 +0800283 atomic_dec(&ring->persistent_gnt_in_use);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200284}
285
Bob Liud4bf0062015-11-14 11:12:19 +0800286static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200287 unsigned int num)
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100288{
289 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
290 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
291 struct persistent_gnt *persistent_gnt;
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100292 struct rb_node *n;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100293 int segs_to_unmap = 0;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000294 struct gntab_unmap_queue_data unmap_data;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000295
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000296 unmap_data.pages = pages;
297 unmap_data.unmap_ops = unmap;
298 unmap_data.kunmap_ops = NULL;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100299
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100300 foreach_grant_safe(persistent_gnt, n, root, node) {
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100301 BUG_ON(persistent_gnt->handle ==
302 BLKBACK_INVALID_HANDLE);
303 gnttab_set_unmap_op(&unmap[segs_to_unmap],
304 (unsigned long) pfn_to_kaddr(page_to_pfn(
305 persistent_gnt->page)),
306 GNTMAP_host_map,
307 persistent_gnt->handle);
308
309 pages[segs_to_unmap] = persistent_gnt->page;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100310
311 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
312 !rb_next(&persistent_gnt->node)) {
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000313
314 unmap_data.count = segs_to_unmap;
Bob Liub44166c2015-04-03 14:42:59 +0800315 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000316
Bob Liud4bf0062015-11-14 11:12:19 +0800317 put_free_pages(ring, pages, segs_to_unmap);
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100318 segs_to_unmap = 0;
319 }
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100320
321 rb_erase(&persistent_gnt->node, root);
322 kfree(persistent_gnt);
323 num--;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100324 }
325 BUG_ON(num != 0);
326}
327
Roger Pau Monneabb97b82014-02-11 20:34:03 -0700328void xen_blkbk_unmap_purged_grants(struct work_struct *work)
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200329{
330 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
332 struct persistent_gnt *persistent_gnt;
Bob Liu325d73b2015-04-03 14:42:58 +0800333 int segs_to_unmap = 0;
Bob Liud4bf0062015-11-14 11:12:19 +0800334 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
Bob Liu325d73b2015-04-03 14:42:58 +0800335 struct gntab_unmap_queue_data unmap_data;
Bob Liu325d73b2015-04-03 14:42:58 +0800336
Bob Liu325d73b2015-04-03 14:42:58 +0800337 unmap_data.pages = pages;
338 unmap_data.unmap_ops = unmap;
339 unmap_data.kunmap_ops = NULL;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200340
Bob Liud4bf0062015-11-14 11:12:19 +0800341 while(!list_empty(&ring->persistent_purge_list)) {
342 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200343 struct persistent_gnt,
344 remove_node);
345 list_del(&persistent_gnt->remove_node);
346
347 gnttab_set_unmap_op(&unmap[segs_to_unmap],
348 vaddr(persistent_gnt->page),
349 GNTMAP_host_map,
350 persistent_gnt->handle);
351
352 pages[segs_to_unmap] = persistent_gnt->page;
353
354 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
Bob Liu325d73b2015-04-03 14:42:58 +0800355 unmap_data.count = segs_to_unmap;
Bob Liub44166c2015-04-03 14:42:59 +0800356 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
Bob Liud4bf0062015-11-14 11:12:19 +0800357 put_free_pages(ring, pages, segs_to_unmap);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200358 segs_to_unmap = 0;
359 }
360 kfree(persistent_gnt);
361 }
362 if (segs_to_unmap > 0) {
Bob Liu325d73b2015-04-03 14:42:58 +0800363 unmap_data.count = segs_to_unmap;
Bob Liub44166c2015-04-03 14:42:59 +0800364 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
Bob Liud4bf0062015-11-14 11:12:19 +0800365 put_free_pages(ring, pages, segs_to_unmap);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200366 }
367}
368
Bob Liud4bf0062015-11-14 11:12:19 +0800369static void purge_persistent_gnt(struct xen_blkif_ring *ring)
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200370{
371 struct persistent_gnt *persistent_gnt;
372 struct rb_node *n;
373 unsigned int num_clean, total;
Roger Pau Monne2d910542013-06-21 12:56:53 +0200374 bool scan_used = false, clean_used = false;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200375 struct rb_root *root;
376
Bob Liud4bf0062015-11-14 11:12:19 +0800377 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
378 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
379 !ring->blkif->vbd.overflow_max_grants)) {
Bob Liu59795702015-11-14 11:12:15 +0800380 goto out;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200381 }
382
Bob Liud4bf0062015-11-14 11:12:19 +0800383 if (work_busy(&ring->persistent_purge_work)) {
Bob Liu53bc7dc2015-07-22 14:40:10 +0800384 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
Bob Liu59795702015-11-14 11:12:15 +0800385 goto out;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200386 }
387
388 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
Bob Liud4bf0062015-11-14 11:12:19 +0800389 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
390 num_clean = min(ring->persistent_gnt_c, num_clean);
Roger Pau Monne2d910542013-06-21 12:56:53 +0200391 if ((num_clean == 0) ||
Bob Liud4bf0062015-11-14 11:12:19 +0800392 (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
Bob Liu59795702015-11-14 11:12:15 +0800393 goto out;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200394
395 /*
396 * At this point, we can assure that there will be no calls
397 * to get_persistent_grant (because we are executing this code from
398 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
399 * which means that the number of currently used grants will go down,
400 * but never up, so we will always be able to remove the requested
401 * number of grants.
402 */
403
404 total = num_clean;
405
Tao Chen77387b82015-04-01 15:04:22 +0000406 pr_debug("Going to purge %u persistent grants\n", num_clean);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200407
Bob Liud4bf0062015-11-14 11:12:19 +0800408 BUG_ON(!list_empty(&ring->persistent_purge_list));
409 root = &ring->persistent_gnts;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200410purge_list:
411 foreach_grant_safe(persistent_gnt, n, root, node) {
412 BUG_ON(persistent_gnt->handle ==
413 BLKBACK_INVALID_HANDLE);
414
Roger Pau Monne2d910542013-06-21 12:56:53 +0200415 if (clean_used) {
416 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
417 continue;
418 }
419
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200420 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
421 continue;
422 if (!scan_used &&
423 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
424 continue;
425
426 rb_erase(&persistent_gnt->node, root);
427 list_add(&persistent_gnt->remove_node,
Bob Liud4bf0062015-11-14 11:12:19 +0800428 &ring->persistent_purge_list);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200429 if (--num_clean == 0)
430 goto finished;
431 }
432 /*
433 * If we get here it means we also need to start cleaning
434 * grants that were used since last purge in order to cope
435 * with the requested num
436 */
Roger Pau Monne2d910542013-06-21 12:56:53 +0200437 if (!scan_used && !clean_used) {
Tao Chen77387b82015-04-01 15:04:22 +0000438 pr_debug("Still missing %u purged frames\n", num_clean);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200439 scan_used = true;
440 goto purge_list;
441 }
442finished:
Roger Pau Monne2d910542013-06-21 12:56:53 +0200443 if (!clean_used) {
Tao Chen77387b82015-04-01 15:04:22 +0000444 pr_debug("Finished scanning for grants to clean, removing used flag\n");
Roger Pau Monne2d910542013-06-21 12:56:53 +0200445 clean_used = true;
446 goto purge_list;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200447 }
Roger Pau Monne2d910542013-06-21 12:56:53 +0200448
Bob Liud4bf0062015-11-14 11:12:19 +0800449 ring->persistent_gnt_c -= (total - num_clean);
450 ring->blkif->vbd.overflow_max_grants = 0;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200451
452 /* We can defer this work */
Bob Liud4bf0062015-11-14 11:12:19 +0800453 schedule_work(&ring->persistent_purge_work);
Tao Chen77387b82015-04-01 15:04:22 +0000454 pr_debug("Purged %u/%u\n", (total - num_clean), total);
Bob Liu59795702015-11-14 11:12:15 +0800455
456out:
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200457 return;
458}
459
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400460/*
461 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400462 */
Bob Liu59795702015-11-14 11:12:15 +0800463static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400464{
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400465 struct pending_req *req = NULL;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400466 unsigned long flags;
467
Bob Liu59795702015-11-14 11:12:15 +0800468 spin_lock_irqsave(&ring->pending_free_lock, flags);
469 if (!list_empty(&ring->pending_free)) {
470 req = list_entry(ring->pending_free.next, struct pending_req,
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400471 free_list);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400472 list_del(&req->free_list);
473 }
Bob Liu59795702015-11-14 11:12:15 +0800474 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400475 return req;
476}
477
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400478/*
479 * Return the 'pending_req' structure back to the freepool. We also
480 * wake up the thread if it was waiting for a free page.
481 */
Bob Liu59795702015-11-14 11:12:15 +0800482static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400483{
484 unsigned long flags;
485 int was_empty;
486
Bob Liu59795702015-11-14 11:12:15 +0800487 spin_lock_irqsave(&ring->pending_free_lock, flags);
488 was_empty = list_empty(&ring->pending_free);
489 list_add(&req->free_list, &ring->pending_free);
490 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400491 if (was_empty)
Bob Liu59795702015-11-14 11:12:15 +0800492 wake_up(&ring->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400493}
494
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400495/*
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400496 * Routines for managing virtual block devices (vbds).
497 */
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400498static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
499 int operation)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400500{
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400501 struct xen_vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400502 int rc = -EACCES;
503
Mike Christiea0226062016-06-05 14:32:09 -0500504 if ((operation != REQ_OP_READ) && vbd->readonly)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400505 goto out;
506
Jan Beulich8ab52152011-05-17 11:07:05 +0100507 if (likely(req->nr_sects)) {
508 blkif_sector_t end = req->sector_number + req->nr_sects;
509
510 if (unlikely(end < req->sector_number))
511 goto out;
512 if (unlikely(end > vbd_sz(vbd)))
513 goto out;
514 }
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400515
516 req->dev = vbd->pdevice;
517 req->bdev = vbd->bdev;
518 rc = 0;
519
520 out:
521 return rc;
522}
523
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400524static void xen_vbd_resize(struct xen_blkif *blkif)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400525{
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400526 struct xen_vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400527 struct xenbus_transaction xbt;
528 int err;
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400529 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400530 unsigned long long new_size = vbd_sz(vbd);
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400531
Tao Chen77387b82015-04-01 15:04:22 +0000532 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400533 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
Tao Chen77387b82015-04-01 15:04:22 +0000534 pr_info("VBD Resize: new size %llu\n", new_size);
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400535 vbd->size = new_size;
536again:
537 err = xenbus_transaction_start(&xbt);
538 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +0000539 pr_warn("Error starting transaction\n");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400540 return;
541 }
542 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400543 (unsigned long long)vbd_sz(vbd));
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400544 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +0000545 pr_warn("Error writing new size\n");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400546 goto abort;
547 }
548 /*
549 * Write the current state; we will use this to synchronize
550 * the front-end. If the current state is "connected" the
551 * front-end will get the new size information online.
552 */
553 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
554 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +0000555 pr_warn("Error writing the state\n");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400556 goto abort;
557 }
558
559 err = xenbus_transaction_end(xbt, 0);
560 if (err == -EAGAIN)
561 goto again;
562 if (err)
Tao Chen77387b82015-04-01 15:04:22 +0000563 pr_warn("Error ending transaction\n");
Laszlo Ersek496b3182011-05-13 09:45:40 -0400564 return;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400565abort:
566 xenbus_transaction_end(xbt, 1);
567}
568
569/*
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400570 * Notification from the guest OS.
571 */
Bob Liu59795702015-11-14 11:12:15 +0800572static void blkif_notify_work(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400573{
Bob Liu59795702015-11-14 11:12:15 +0800574 ring->waiting_reqs = 1;
575 wake_up(&ring->wq);
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400576}
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400577
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400578irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400579{
580 blkif_notify_work(dev_id);
581 return IRQ_HANDLED;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400582}
583
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400584/*
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400585 * SCHEDULER FUNCTIONS
586 */
587
Bob Liud4bf0062015-11-14 11:12:19 +0800588static void print_stats(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400589{
Tao Chen77387b82015-04-01 15:04:22 +0000590 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200591 " | ds %4llu | pg: %4u/%4d\n",
Bob Liudb6fbc12015-12-09 07:44:02 +0800592 current->comm, ring->st_oo_req,
593 ring->st_rd_req, ring->st_wr_req,
594 ring->st_f_req, ring->st_ds_req,
Bob Liud4bf0062015-11-14 11:12:19 +0800595 ring->persistent_gnt_c,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200596 xen_blkif_max_pgrants);
Bob Liudb6fbc12015-12-09 07:44:02 +0800597 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
598 ring->st_rd_req = 0;
599 ring->st_wr_req = 0;
600 ring->st_oo_req = 0;
601 ring->st_ds_req = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400602}
603
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400604int xen_blkif_schedule(void *arg)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400605{
Bob Liu59795702015-11-14 11:12:15 +0800606 struct xen_blkif_ring *ring = arg;
607 struct xen_blkif *blkif = ring->blkif;
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400608 struct xen_vbd *vbd = &blkif->vbd;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200609 unsigned long timeout;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500610 int ret;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400611
Jiri Kosinaa6e7af12015-10-26 14:47:21 +0900612 set_freezable();
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400613 while (!kthread_should_stop()) {
614 if (try_to_freeze())
615 continue;
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400616 if (unlikely(vbd->size != vbd_sz(vbd)))
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400617 xen_vbd_resize(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400618
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200619 timeout = msecs_to_jiffies(LRU_INTERVAL);
620
621 timeout = wait_event_interruptible_timeout(
Bob Liu59795702015-11-14 11:12:15 +0800622 ring->wq,
623 ring->waiting_reqs || kthread_should_stop(),
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200624 timeout);
625 if (timeout == 0)
626 goto purge_gnt_list;
627 timeout = wait_event_interruptible_timeout(
Bob Liu59795702015-11-14 11:12:15 +0800628 ring->pending_free_wq,
629 !list_empty(&ring->pending_free) ||
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200630 kthread_should_stop(),
631 timeout);
632 if (timeout == 0)
633 goto purge_gnt_list;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400634
Bob Liu59795702015-11-14 11:12:15 +0800635 ring->waiting_reqs = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400636 smp_mb(); /* clear flag *before* checking for work */
637
Bob Liu59795702015-11-14 11:12:15 +0800638 ret = do_block_io_op(ring);
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500639 if (ret > 0)
Bob Liu59795702015-11-14 11:12:15 +0800640 ring->waiting_reqs = 1;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500641 if (ret == -EACCES)
Bob Liu59795702015-11-14 11:12:15 +0800642 wait_event_interruptible(ring->shutdown_wq,
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500643 kthread_should_stop());
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400644
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200645purge_gnt_list:
646 if (blkif->vbd.feature_gnt_persistent &&
Bob Liud4bf0062015-11-14 11:12:19 +0800647 time_after(jiffies, ring->next_lru)) {
648 purge_persistent_gnt(ring);
649 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200650 }
651
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200652 /* Shrink if we have more than xen_blkif_max_buffer_pages */
Bob Liud4bf0062015-11-14 11:12:19 +0800653 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200654
Bob Liudb6fbc12015-12-09 07:44:02 +0800655 if (log_stats && time_after(jiffies, ring->st_print))
Bob Liud4bf0062015-11-14 11:12:19 +0800656 print_stats(ring);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400657 }
658
Roger Pau Monneef753412014-02-04 11:26:13 +0100659 /* Drain pending purge work */
Bob Liud4bf0062015-11-14 11:12:19 +0800660 flush_work(&ring->persistent_purge_work);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200661
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400662 if (log_stats)
Bob Liud4bf0062015-11-14 11:12:19 +0800663 print_stats(ring);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400664
Bob Liu59795702015-11-14 11:12:15 +0800665 ring->xenblkd = NULL;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400666
667 return 0;
668}
669
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400670/*
Roger Pau Monneef753412014-02-04 11:26:13 +0100671 * Remove persistent grants and empty the pool of free pages
672 */
Bob Liu59795702015-11-14 11:12:15 +0800673void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
Roger Pau Monneef753412014-02-04 11:26:13 +0100674{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400675 /* Free all persistent grant pages */
Bob Liud4bf0062015-11-14 11:12:19 +0800676 if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
677 free_persistent_gnts(ring, &ring->persistent_gnts,
678 ring->persistent_gnt_c);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400679
Bob Liud4bf0062015-11-14 11:12:19 +0800680 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
681 ring->persistent_gnt_c = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400682
Matt Rushton2ed22e32014-02-04 11:26:12 +0100683 /* Since we are shutting down remove all pages from the buffer */
Bob Liud4bf0062015-11-14 11:12:19 +0800684 shrink_free_pagepool(ring, 0 /* All */);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400685}
686
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000687static unsigned int xen_blkbk_unmap_prepare(
Bob Liu59795702015-11-14 11:12:15 +0800688 struct xen_blkif_ring *ring,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000689 struct grant_page **pages,
690 unsigned int num,
691 struct gnttab_unmap_grant_ref *unmap_ops,
692 struct page **unmap_pages)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400693{
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400694 unsigned int i, invcount = 0;
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400695
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200696 for (i = 0; i < num; i++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200697 if (pages[i]->persistent_gnt != NULL) {
Bob Liud4bf0062015-11-14 11:12:19 +0800698 put_persistent_gnt(ring, pages[i]->persistent_gnt);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200699 continue;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200700 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200701 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400702 continue;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200703 unmap_pages[invcount] = pages[i]->page;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000704 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200705 GNTMAP_host_map, pages[i]->handle);
706 pages[i]->handle = BLKBACK_INVALID_HANDLE;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000707 invcount++;
708 }
709
710 return invcount;
711}
712
713static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
714{
Bob Liu59795702015-11-14 11:12:15 +0800715 struct pending_req *pending_req = (struct pending_req *)(data->data);
716 struct xen_blkif_ring *ring = pending_req->ring;
717 struct xen_blkif *blkif = ring->blkif;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000718
719 /* BUG_ON used to reproduce existing behaviour,
720 but is this the best way to deal with this? */
721 BUG_ON(result);
722
Bob Liud4bf0062015-11-14 11:12:19 +0800723 put_free_pages(ring, data->pages, data->count);
Bob Liu59795702015-11-14 11:12:15 +0800724 make_response(ring, pending_req->id,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000725 pending_req->operation, pending_req->status);
Bob Liu59795702015-11-14 11:12:15 +0800726 free_req(ring, pending_req);
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000727 /*
728 * Make sure the request is freed before releasing blkif,
729 * or there could be a race between free_req and the
730 * cleanup done in xen_blkif_free during shutdown.
731 *
732 * NB: The fact that we might try to wake up pending_free_wq
733 * before drain_complete (in case there's a drain going on)
734 * it's not a problem with our current implementation
735 * because we can assure there's no thread waiting on
736 * pending_free_wq if there's a drain going on, but it has
737 * to be taken into account if the current model is changed.
738 */
Bob Liu59795702015-11-14 11:12:15 +0800739 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000740 complete(&blkif->drain_complete);
741 }
742 xen_blkif_put(blkif);
743}
744
745static void xen_blkbk_unmap_and_respond(struct pending_req *req)
746{
747 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
Bob Liu59795702015-11-14 11:12:15 +0800748 struct xen_blkif_ring *ring = req->ring;
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000749 struct grant_page **pages = req->segments;
750 unsigned int invcount;
751
Bob Liu59795702015-11-14 11:12:15 +0800752 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000753 req->unmap, req->unmap_pages);
754
755 work->data = req;
756 work->done = xen_blkbk_unmap_and_respond_callback;
757 work->unmap_ops = req->unmap;
758 work->kunmap_ops = NULL;
759 work->pages = req->unmap_pages;
760 work->count = invcount;
761
762 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
763}
764
765
766/*
767 * Unmap the grant references.
768 *
769 * This could accumulate ops up to the batch size to reduce the number
770 * of hypercalls, but since this is only used in error paths there's
771 * no real need.
772 */
Bob Liu59795702015-11-14 11:12:15 +0800773static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000774 struct grant_page *pages[],
775 int num)
776{
777 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
778 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
779 unsigned int invcount = 0;
780 int ret;
781
782 while (num) {
783 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
Bob Liu59795702015-11-14 11:12:15 +0800784
785 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000786 unmap, unmap_pages);
787 if (invcount) {
788 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200789 BUG_ON(ret);
Bob Liud4bf0062015-11-14 11:12:19 +0800790 put_free_pages(ring, unmap_pages, invcount);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200791 }
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +0000792 pages += batch;
793 num -= batch;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200794 }
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400795}
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400796
Bob Liu59795702015-11-14 11:12:15 +0800797static int xen_blkbk_map(struct xen_blkif_ring *ring,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200798 struct grant_page *pages[],
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200799 int num, bool ro)
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400800{
801 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200802 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
803 struct persistent_gnt *persistent_gnt = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200804 phys_addr_t addr = 0;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200805 int i, seg_idx, new_map_idx;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200806 int segs_to_map = 0;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400807 int ret = 0;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200808 int last_map = 0, map_until = 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200809 int use_persistent_gnts;
Bob Liu59795702015-11-14 11:12:15 +0800810 struct xen_blkif *blkif = ring->blkif;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200811
812 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
813
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400814 /*
815 * Fill out preq.nr_sects with proper amount of sectors, and setup
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400816 * assign map[..] with the PFN of the page in our domain with the
817 * corresponding grant reference for each page.
818 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200819again:
820 for (i = map_until; i < num; i++) {
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400821 uint32_t flags;
822
Bob Liu59795702015-11-14 11:12:15 +0800823 if (use_persistent_gnts) {
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200824 persistent_gnt = get_persistent_gnt(
Bob Liud4bf0062015-11-14 11:12:19 +0800825 ring,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200826 pages[i]->gref);
Bob Liu59795702015-11-14 11:12:15 +0800827 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200828
829 if (persistent_gnt) {
830 /*
831 * We are using persistent grants and
832 * the grant is already mapped
833 */
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200834 pages[i]->page = persistent_gnt->page;
835 pages[i]->persistent_gnt = persistent_gnt;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200836 } else {
Bob Liud4bf0062015-11-14 11:12:19 +0800837 if (get_free_page(ring, &pages[i]->page))
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200838 goto out_of_memory;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200839 addr = vaddr(pages[i]->page);
840 pages_to_gnt[segs_to_map] = pages[i]->page;
841 pages[i]->persistent_gnt = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200842 flags = GNTMAP_host_map;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200843 if (!use_persistent_gnts && ro)
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200844 flags |= GNTMAP_readonly;
845 gnttab_set_map_op(&map[segs_to_map++], addr,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200846 flags, pages[i]->gref,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200847 blkif->domid);
848 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200849 map_until = i + 1;
850 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
851 break;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400852 }
853
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200854 if (segs_to_map) {
855 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
856 BUG_ON(ret);
857 }
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400858
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400859 /*
860 * Now swizzle the MFN in our domain with the MFN from the other domain
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400861 * so that when we access vaddr(pending_req,i) it has the contents of
862 * the page from the other domain.
863 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200864 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200865 if (!pages[seg_idx]->persistent_gnt) {
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200866 /* This is a newly mapped grant */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200867 BUG_ON(new_map_idx >= segs_to_map);
868 if (unlikely(map[new_map_idx].status != 0)) {
Tao Chen77387b82015-04-01 15:04:22 +0000869 pr_debug("invalid buffer -- could not remap it\n");
Bob Liud4bf0062015-11-14 11:12:19 +0800870 put_free_pages(ring, &pages[seg_idx]->page, 1);
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200871 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200872 ret |= 1;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200873 goto next;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200874 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200875 pages[seg_idx]->handle = map[new_map_idx].handle;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200876 } else {
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200877 continue;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200878 }
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200879 if (use_persistent_gnts &&
Bob Liud4bf0062015-11-14 11:12:19 +0800880 ring->persistent_gnt_c < xen_blkif_max_pgrants) {
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200881 /*
882 * We are using persistent grants, the grant is
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200883 * not mapped but we might have room for it.
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200884 */
885 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
886 GFP_KERNEL);
887 if (!persistent_gnt) {
888 /*
889 * If we don't have enough memory to
890 * allocate the persistent_gnt struct
891 * map this grant non-persistenly
892 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200893 goto next;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200894 }
895 persistent_gnt->gnt = map[new_map_idx].ref;
896 persistent_gnt->handle = map[new_map_idx].handle;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200897 persistent_gnt->page = pages[seg_idx]->page;
Bob Liud4bf0062015-11-14 11:12:19 +0800898 if (add_persistent_gnt(ring,
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200899 persistent_gnt)) {
900 kfree(persistent_gnt);
901 persistent_gnt = NULL;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200902 goto next;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200903 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200904 pages[seg_idx]->persistent_gnt = persistent_gnt;
Tao Chen77387b82015-04-01 15:04:22 +0000905 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
Bob Liud4bf0062015-11-14 11:12:19 +0800906 persistent_gnt->gnt, ring->persistent_gnt_c,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200907 xen_blkif_max_pgrants);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200908 goto next;
909 }
910 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
911 blkif->vbd.overflow_max_grants = 1;
Tao Chen77387b82015-04-01 15:04:22 +0000912 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200913 blkif->domid, blkif->vbd.handle);
914 }
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200915 /*
916 * We could not map this grant persistently, so use it as
917 * a non-persistent grant.
918 */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200919next:
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200920 new_map_idx++;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400921 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200922 segs_to_map = 0;
923 last_map = map_until;
924 if (map_until != num)
925 goto again;
926
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400927 return ret;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200928
929out_of_memory:
Tao Chen77387b82015-04-01 15:04:22 +0000930 pr_alert("%s: out of memory\n", __func__);
Bob Liud4bf0062015-11-14 11:12:19 +0800931 put_free_pages(ring, pages_to_gnt, segs_to_map);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200932 return -ENOMEM;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400933}
934
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200935static int xen_blkbk_map_seg(struct pending_req *pending_req)
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200936{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200937 int rc;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200938
Bob Liu59795702015-11-14 11:12:15 +0800939 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
Julien Grall6684fa12015-06-17 15:28:08 +0100940 pending_req->nr_segs,
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200941 (pending_req->operation != BLKIF_OP_READ));
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200942
943 return rc;
944}
945
946static int xen_blkbk_parse_indirect(struct blkif_request *req,
947 struct pending_req *pending_req,
948 struct seg_buf seg[],
949 struct phys_req *preq)
950{
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200951 struct grant_page **pages = pending_req->indirect_pages;
Bob Liu59795702015-11-14 11:12:15 +0800952 struct xen_blkif_ring *ring = pending_req->ring;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200953 int indirect_grefs, rc, n, nseg, i;
Roger Pau Monne80bfa2f2014-02-04 11:26:15 +0100954 struct blkif_request_segment *segments = NULL;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200955
Julien Grall6684fa12015-06-17 15:28:08 +0100956 nseg = pending_req->nr_segs;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200957 indirect_grefs = INDIRECT_PAGES(nseg);
958 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
959
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200960 for (i = 0; i < indirect_grefs; i++)
961 pages[i]->gref = req->u.indirect.indirect_grefs[i];
962
Bob Liu59795702015-11-14 11:12:15 +0800963 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200964 if (rc)
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200965 goto unmap;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200966
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200967 for (n = 0, i = 0; n < nseg; n++) {
Roger Pau Monné18779142015-11-03 16:40:43 +0000968 uint8_t first_sect, last_sect;
969
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200970 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
971 /* Map indirect segments */
972 if (segments)
973 kunmap_atomic(segments);
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200974 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200975 }
976 i = n % SEGS_PER_INDIRECT_FRAME;
Roger Pau Monné18779142015-11-03 16:40:43 +0000977
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200978 pending_req->segments[n]->gref = segments[i].gref;
Roger Pau Monné18779142015-11-03 16:40:43 +0000979
980 first_sect = READ_ONCE(segments[i].first_sect);
981 last_sect = READ_ONCE(segments[i].last_sect);
982 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200983 rc = -EINVAL;
984 goto unmap;
985 }
Roger Pau Monné18779142015-11-03 16:40:43 +0000986
987 seg[n].nsec = last_sect - first_sect + 1;
988 seg[n].offset = first_sect << 9;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200989 preq->nr_sects += seg[n].nsec;
990 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200991
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200992unmap:
993 if (segments)
994 kunmap_atomic(segments);
Bob Liu59795702015-11-14 11:12:15 +0800995 xen_blkbk_unmap(ring, pages, indirect_grefs);
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200996 return rc;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200997}
998
Bob Liu59795702015-11-14 11:12:15 +0800999static int dispatch_discard_io(struct xen_blkif_ring *ring,
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001000 struct blkif_request *req)
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001001{
1002 int err = 0;
1003 int status = BLKIF_RSP_OKAY;
Bob Liu59795702015-11-14 11:12:15 +08001004 struct xen_blkif *blkif = ring->blkif;
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001005 struct block_device *bdev = blkif->vbd.bdev;
Konrad Rzeszutek Wilk4dae7672012-03-13 18:43:23 -04001006 unsigned long secure;
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001007 struct phys_req preq;
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001008
Vegard Nossumea5ec762013-09-05 13:00:14 +02001009 xen_blkif_get(blkif);
1010
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001011 preq.sector_number = req->u.discard.sector_number;
1012 preq.nr_sects = req->u.discard.nr_sectors;
1013
Mike Christiea0226062016-06-05 14:32:09 -05001014 err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001015 if (err) {
Tao Chen77387b82015-04-01 15:04:22 +00001016 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001017 preq.sector_number,
1018 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1019 goto fail_response;
1020 }
Bob Liudb6fbc12015-12-09 07:44:02 +08001021 ring->st_ds_req++;
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001022
Konrad Rzeszutek Wilk4dae7672012-03-13 18:43:23 -04001023 secure = (blkif->vbd.discard_secure &&
1024 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1025 BLKDEV_DISCARD_SECURE : 0;
1026
1027 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1028 req->u.discard.nr_sectors,
1029 GFP_KERNEL, secure);
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -05001030fail_response:
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001031 if (err == -EOPNOTSUPP) {
Tao Chen77387b82015-04-01 15:04:22 +00001032 pr_debug("discard op failed, not supported\n");
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001033 status = BLKIF_RSP_EOPNOTSUPP;
1034 } else if (err)
1035 status = BLKIF_RSP_ERROR;
1036
Bob Liu59795702015-11-14 11:12:15 +08001037 make_response(ring, req->u.discard.id, req->operation, status);
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001038 xen_blkif_put(blkif);
1039 return err;
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001040}
1041
Bob Liu59795702015-11-14 11:12:15 +08001042static int dispatch_other_io(struct xen_blkif_ring *ring,
David Vrabel0e367ae2013-03-07 17:32:01 +00001043 struct blkif_request *req,
1044 struct pending_req *pending_req)
1045{
Bob Liu59795702015-11-14 11:12:15 +08001046 free_req(ring, pending_req);
1047 make_response(ring, req->u.other.id, req->operation,
David Vrabel0e367ae2013-03-07 17:32:01 +00001048 BLKIF_RSP_EOPNOTSUPP);
1049 return -EIO;
1050}
1051
Bob Liu59795702015-11-14 11:12:15 +08001052static void xen_blk_drain_io(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001053{
Bob Liu59795702015-11-14 11:12:15 +08001054 struct xen_blkif *blkif = ring->blkif;
1055
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001056 atomic_set(&blkif->drain, 1);
1057 do {
Bob Liu59795702015-11-14 11:12:15 +08001058 if (atomic_read(&ring->inflight) == 0)
Konrad Rzeszutek Wilk6927d922011-10-17 14:27:48 -04001059 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001060 wait_for_completion_interruptible_timeout(
1061 &blkif->drain_complete, HZ);
1062
1063 if (!atomic_read(&blkif->drain))
1064 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001065 } while (!kthread_should_stop());
1066 atomic_set(&blkif->drain, 0);
1067}
1068
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -04001069/*
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001070 * Completion callback on the bio's. Called as bh->b_end_io()
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001071 */
1072
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001073static void __end_block_io_op(struct pending_req *pending_req, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001074{
1075 /* An error fails the entire request. */
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001076 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001077 (error == -EOPNOTSUPP)) {
Tao Chen77387b82015-04-01 15:04:22 +00001078 pr_debug("flush diskcache op failed, not supported\n");
Bob Liu59795702015-11-14 11:12:15 +08001079 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001080 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001081 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1082 (error == -EOPNOTSUPP)) {
Tao Chen77387b82015-04-01 15:04:22 +00001083 pr_debug("write barrier op failed, not supported\n");
Bob Liu59795702015-11-14 11:12:15 +08001084 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001085 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001086 } else if (error) {
Tao Chen77387b82015-04-01 15:04:22 +00001087 pr_debug("Buffer not up-to-date at end of operation,"
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001088 " error=%d\n", error);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001089 pending_req->status = BLKIF_RSP_ERROR;
1090 }
1091
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001092 /*
1093 * If all of the bio's have completed it is time to unmap
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001094 * the grant references associated with 'request' and provide
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001095 * the proper response on the ring.
1096 */
Jennifer Herbertc43cf3e2015-01-05 16:49:22 +00001097 if (atomic_dec_and_test(&pending_req->pendcnt))
1098 xen_blkbk_unmap_and_respond(pending_req);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001099}
1100
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001101/*
1102 * bio callback.
1103 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001104static void end_block_io_op(struct bio *bio)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001105{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001106 __end_block_io_op(bio->bi_private, bio->bi_error);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001107 bio_put(bio);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001108}
1109
1110
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001111
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001112/*
1113 * Function to copy the from the ring buffer the 'struct blkif_request'
1114 * (which has the sectors we want, number of them, grant references, etc),
1115 * and transmute it to the block API to hand it over to the proper block disk.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001116 */
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001117static int
Bob Liu59795702015-11-14 11:12:15 +08001118__do_block_io_op(struct xen_blkif_ring *ring)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001119{
Bob Liu59795702015-11-14 11:12:15 +08001120 union blkif_back_rings *blk_rings = &ring->blk_rings;
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -08001121 struct blkif_request req;
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001122 struct pending_req *pending_req;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001123 RING_IDX rc, rp;
1124 int more_to_do = 0;
1125
1126 rc = blk_rings->common.req_cons;
1127 rp = blk_rings->common.sring->req_prod;
1128 rmb(); /* Ensure we see queued requests up to 'rp'. */
1129
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -05001130 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1131 rc = blk_rings->common.rsp_prod_pvt;
Tao Chen77387b82015-04-01 15:04:22 +00001132 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
Bob Liu59795702015-11-14 11:12:15 +08001133 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -05001134 return -EACCES;
1135 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001136 while (rc != rp) {
1137
1138 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1139 break;
1140
Keir Fraser8270b452009-03-06 08:29:15 +00001141 if (kthread_should_stop()) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001142 more_to_do = 1;
1143 break;
1144 }
1145
Bob Liu59795702015-11-14 11:12:15 +08001146 pending_req = alloc_req(ring);
Keir Fraser8270b452009-03-06 08:29:15 +00001147 if (NULL == pending_req) {
Bob Liudb6fbc12015-12-09 07:44:02 +08001148 ring->st_oo_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001149 more_to_do = 1;
1150 break;
1151 }
1152
Bob Liu59795702015-11-14 11:12:15 +08001153 switch (ring->blkif->blk_protocol) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001154 case BLKIF_PROTOCOL_NATIVE:
1155 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1156 break;
1157 case BLKIF_PROTOCOL_X86_32:
1158 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1159 break;
1160 case BLKIF_PROTOCOL_X86_64:
1161 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1162 break;
1163 default:
1164 BUG();
1165 }
1166 blk_rings->common.req_cons = ++rc; /* before make_response() */
1167
1168 /* Apply all sanity checks to /private copy/ of request. */
1169 barrier();
David Vrabel0e367ae2013-03-07 17:32:01 +00001170
1171 switch (req.operation) {
1172 case BLKIF_OP_READ:
1173 case BLKIF_OP_WRITE:
1174 case BLKIF_OP_WRITE_BARRIER:
1175 case BLKIF_OP_FLUSH_DISKCACHE:
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001176 case BLKIF_OP_INDIRECT:
Bob Liu59795702015-11-14 11:12:15 +08001177 if (dispatch_rw_block_io(ring, &req, pending_req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001178 goto done;
1179 break;
1180 case BLKIF_OP_DISCARD:
Bob Liu59795702015-11-14 11:12:15 +08001181 free_req(ring, pending_req);
1182 if (dispatch_discard_io(ring, &req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001183 goto done;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001184 break;
David Vrabel0e367ae2013-03-07 17:32:01 +00001185 default:
Bob Liu59795702015-11-14 11:12:15 +08001186 if (dispatch_other_io(ring, &req, pending_req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001187 goto done;
1188 break;
1189 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001190
1191 /* Yield point for this unbounded loop. */
1192 cond_resched();
1193 }
David Vrabel0e367ae2013-03-07 17:32:01 +00001194done:
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001195 return more_to_do;
1196}
1197
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001198static int
Bob Liu59795702015-11-14 11:12:15 +08001199do_block_io_op(struct xen_blkif_ring *ring)
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001200{
Bob Liu59795702015-11-14 11:12:15 +08001201 union blkif_back_rings *blk_rings = &ring->blk_rings;
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001202 int more_to_do;
1203
1204 do {
Bob Liu59795702015-11-14 11:12:15 +08001205 more_to_do = __do_block_io_op(ring);
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001206 if (more_to_do)
1207 break;
1208
1209 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1210 } while (more_to_do);
1211
1212 return more_to_do;
1213}
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001214/*
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001215 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1216 * and call the 'submit_bio' to pass it to the underlying storage.
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001217 */
Bob Liu59795702015-11-14 11:12:15 +08001218static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -04001219 struct blkif_request *req,
1220 struct pending_req *pending_req)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001221{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001222 struct phys_req preq;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001223 struct seg_buf *seg = pending_req->seg;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001224 unsigned int nseg;
1225 struct bio *bio = NULL;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001226 struct bio **biolist = pending_req->biolist;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001227 int i, nbio = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001228 int operation;
Mike Christiea0226062016-06-05 14:32:09 -05001229 int operation_flags = 0;
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001230 struct blk_plug plug;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001231 bool drain = false;
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001232 struct grant_page **pages = pending_req->segments;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001233 unsigned short req_operation;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001234
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001235 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1236 req->u.indirect.indirect_op : req->operation;
Julien Grall67de5df2015-05-05 16:25:56 +01001237
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001238 if ((req->operation == BLKIF_OP_INDIRECT) &&
1239 (req_operation != BLKIF_OP_READ) &&
1240 (req_operation != BLKIF_OP_WRITE)) {
Tao Chen77387b82015-04-01 15:04:22 +00001241 pr_debug("Invalid indirect operation (%u)\n", req_operation);
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001242 goto fail_response;
1243 }
1244
1245 switch (req_operation) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001246 case BLKIF_OP_READ:
Bob Liudb6fbc12015-12-09 07:44:02 +08001247 ring->st_rd_req++;
Mike Christiea0226062016-06-05 14:32:09 -05001248 operation = REQ_OP_READ;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001249 break;
1250 case BLKIF_OP_WRITE:
Bob Liudb6fbc12015-12-09 07:44:02 +08001251 ring->st_wr_req++;
Mike Christiea0226062016-06-05 14:32:09 -05001252 operation = REQ_OP_WRITE;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001253 operation_flags = REQ_SYNC | REQ_IDLE;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001254 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001255 case BLKIF_OP_WRITE_BARRIER:
1256 drain = true;
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001257 case BLKIF_OP_FLUSH_DISKCACHE:
Bob Liudb6fbc12015-12-09 07:44:02 +08001258 ring->st_f_req++;
Mike Christiea0226062016-06-05 14:32:09 -05001259 operation = REQ_OP_WRITE;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001260 operation_flags = REQ_PREFLUSH;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001261 break;
1262 default:
1263 operation = 0; /* make gcc happy */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001264 goto fail_response;
1265 break;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001266 }
1267
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001268 /* Check that the number of segments is sane. */
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001269 nseg = req->operation == BLKIF_OP_INDIRECT ?
1270 req->u.indirect.nr_segments : req->u.rw.nr_segments;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -04001271
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001272 if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001273 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1274 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1275 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1276 (nseg > MAX_INDIRECT_SEGMENTS))) {
Tao Chen77387b82015-04-01 15:04:22 +00001277 pr_debug("Bad number of segments in request (%d)\n", nseg);
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001278 /* Haven't submitted any bio's yet. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001279 goto fail_response;
1280 }
1281
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001282 preq.nr_sects = 0;
1283
Bob Liu59795702015-11-14 11:12:15 +08001284 pending_req->ring = ring;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -04001285 pending_req->id = req->u.rw.id;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001286 pending_req->operation = req_operation;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001287 pending_req->status = BLKIF_RSP_OKAY;
Julien Grall6684fa12015-06-17 15:28:08 +01001288 pending_req->nr_segs = nseg;
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001289
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001290 if (req->operation != BLKIF_OP_INDIRECT) {
1291 preq.dev = req->u.rw.handle;
1292 preq.sector_number = req->u.rw.sector_number;
1293 for (i = 0; i < nseg; i++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001294 pages[i]->gref = req->u.rw.seg[i].gref;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001295 seg[i].nsec = req->u.rw.seg[i].last_sect -
1296 req->u.rw.seg[i].first_sect + 1;
1297 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
Julien Grall67de5df2015-05-05 16:25:56 +01001298 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001299 (req->u.rw.seg[i].last_sect <
1300 req->u.rw.seg[i].first_sect))
1301 goto fail_response;
1302 preq.nr_sects += seg[i].nsec;
1303 }
1304 } else {
1305 preq.dev = req->u.indirect.handle;
1306 preq.sector_number = req->u.indirect.sector_number;
1307 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001308 goto fail_response;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001309 }
1310
Bob Liu59795702015-11-14 11:12:15 +08001311 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
Tao Chen77387b82015-04-01 15:04:22 +00001312 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
Mike Christiea0226062016-06-05 14:32:09 -05001313 operation == REQ_OP_READ ? "read" : "write",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001314 preq.sector_number,
Chen Ganga72d9002013-02-28 10:34:23 +08001315 preq.sector_number + preq.nr_sects,
Bob Liu59795702015-11-14 11:12:15 +08001316 ring->blkif->vbd.pdevice);
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001317 goto fail_response;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001318 }
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001319
1320 /*
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -04001321 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001322 * is set there.
1323 */
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001324 for (i = 0; i < nseg; i++) {
1325 if (((int)preq.sector_number|(int)seg[i].nsec) &
1326 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
Tao Chen77387b82015-04-01 15:04:22 +00001327 pr_debug("Misaligned I/O request from domain %d\n",
Bob Liu59795702015-11-14 11:12:15 +08001328 ring->blkif->domid);
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001329 goto fail_response;
1330 }
1331 }
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001332
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001333 /* Wait on all outstanding I/O's and once that has been completed
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001334 * issue the flush.
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001335 */
1336 if (drain)
Bob Liu59795702015-11-14 11:12:15 +08001337 xen_blk_drain_io(pending_req->ring);
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001338
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001339 /*
1340 * If we have failed at this point, we need to undo the M2P override,
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001341 * set gnttab_set_unmap_op on all of the grant references and perform
1342 * the hypercall to unmap the grants - that is all done in
Konrad Rzeszutek Wilk9f3aedf2011-04-15 11:50:34 -04001343 * xen_blkbk_unmap.
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001344 */
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001345 if (xen_blkbk_map_seg(pending_req))
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001346 goto fail_flush;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001347
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001348 /*
1349 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1350 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1351 */
Bob Liu59795702015-11-14 11:12:15 +08001352 xen_blkif_get(ring->blkif);
1353 atomic_inc(&ring->inflight);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001354
1355 for (i = 0; i < nseg; i++) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001356 while ((bio == NULL) ||
1357 (bio_add_page(bio,
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001358 pages[i]->page,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001359 seg[i].nsec << 9,
Roger Pau Monneffb1dab2013-03-18 17:49:32 +01001360 seg[i].offset) == 0)) {
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001361
Roger Pau Monne1e0f7a22013-06-22 09:59:17 +02001362 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1363 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001364 if (unlikely(bio == NULL))
1365 goto fail_put_bio;
1366
Konrad Rzeszutek Wilk03e0edf2011-05-12 16:19:23 -04001367 biolist[nbio++] = bio;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001368 bio->bi_bdev = preq.bdev;
1369 bio->bi_private = pending_req;
1370 bio->bi_end_io = end_block_io_op;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001371 bio->bi_iter.bi_sector = preq.sector_number;
Mike Christiea0226062016-06-05 14:32:09 -05001372 bio_set_op_attrs(bio, operation, operation_flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001373 }
1374
1375 preq.sector_number += seg[i].nsec;
1376 }
1377
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001378 /* This will be hit if the operation was a flush or discard. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001379 if (!bio) {
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001380 BUG_ON(operation_flags != REQ_PREFLUSH);
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -04001381
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001382 bio = bio_alloc(GFP_KERNEL, 0);
1383 if (unlikely(bio == NULL))
1384 goto fail_put_bio;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001385
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001386 biolist[nbio++] = bio;
1387 bio->bi_bdev = preq.bdev;
1388 bio->bi_private = pending_req;
1389 bio->bi_end_io = end_block_io_op;
Mike Christiea0226062016-06-05 14:32:09 -05001390 bio_set_op_attrs(bio, operation, operation_flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001391 }
1392
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001393 atomic_set(&pending_req->pendcnt, nbio);
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001394 blk_start_plug(&plug);
1395
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001396 for (i = 0; i < nbio; i++)
Mike Christie4e49ea42016-06-05 14:31:41 -05001397 submit_bio(biolist[i]);
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001398
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001399 /* Let the I/Os go.. */
Konrad Rzeszutek Wilk3d68b392011-05-05 13:42:10 -04001400 blk_finish_plug(&plug);
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001401
Mike Christiea0226062016-06-05 14:32:09 -05001402 if (operation == REQ_OP_READ)
Bob Liudb6fbc12015-12-09 07:44:02 +08001403 ring->st_rd_sect += preq.nr_sects;
Mike Christiea0226062016-06-05 14:32:09 -05001404 else if (operation == REQ_OP_WRITE)
Bob Liudb6fbc12015-12-09 07:44:02 +08001405 ring->st_wr_sect += preq.nr_sects;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001406
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001407 return 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001408
1409 fail_flush:
Bob Liu59795702015-11-14 11:12:15 +08001410 xen_blkbk_unmap(ring, pending_req->segments,
Julien Grall6684fa12015-06-17 15:28:08 +01001411 pending_req->nr_segs);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001412 fail_response:
Konrad Rzeszutek Wilk0faa8cc2011-04-14 17:58:19 -04001413 /* Haven't submitted any bio's yet. */
Bob Liu59795702015-11-14 11:12:15 +08001414 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1415 free_req(ring, pending_req);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001416 msleep(1); /* back off a bit */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001417 return -EIO;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001418
1419 fail_put_bio:
Konrad Rzeszutek Wilk03e0edf2011-05-12 16:19:23 -04001420 for (i = 0; i < nbio; i++)
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001421 bio_put(biolist[i]);
Jan Beulich0e5e0982013-03-11 09:39:55 +00001422 atomic_set(&pending_req->pendcnt, 1);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001423 __end_block_io_op(pending_req, -EINVAL);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001424 msleep(1); /* back off a bit */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001425 return -EIO;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001426}
1427
1428
1429
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001430/*
1431 * Put a response on the ring on how the operation fared.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001432 */
Bob Liu59795702015-11-14 11:12:15 +08001433static void make_response(struct xen_blkif_ring *ring, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001434 unsigned short op, int st)
1435{
Jan Beulich089bc012017-06-13 16:28:27 -04001436 struct blkif_response *resp;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001437 unsigned long flags;
Bob Liu59795702015-11-14 11:12:15 +08001438 union blkif_back_rings *blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001439 int notify;
1440
Bob Liu59795702015-11-14 11:12:15 +08001441 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1442 blk_rings = &ring->blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001443 /* Place on the response ring for the relevant domain. */
Bob Liu59795702015-11-14 11:12:15 +08001444 switch (ring->blkif->blk_protocol) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001445 case BLKIF_PROTOCOL_NATIVE:
Jan Beulich089bc012017-06-13 16:28:27 -04001446 resp = RING_GET_RESPONSE(&blk_rings->native,
1447 blk_rings->native.rsp_prod_pvt);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001448 break;
1449 case BLKIF_PROTOCOL_X86_32:
Jan Beulich089bc012017-06-13 16:28:27 -04001450 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1451 blk_rings->x86_32.rsp_prod_pvt);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001452 break;
1453 case BLKIF_PROTOCOL_X86_64:
Jan Beulich089bc012017-06-13 16:28:27 -04001454 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1455 blk_rings->x86_64.rsp_prod_pvt);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001456 break;
1457 default:
1458 BUG();
1459 }
Jan Beulich089bc012017-06-13 16:28:27 -04001460
1461 resp->id = id;
1462 resp->operation = op;
1463 resp->status = st;
1464
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001465 blk_rings->common.rsp_prod_pvt++;
1466 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
Bob Liu59795702015-11-14 11:12:15 +08001467 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001468 if (notify)
Bob Liu59795702015-11-14 11:12:15 +08001469 notify_remote_via_irq(ring->irq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001470}
1471
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001472static int __init xen_blkif_init(void)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001473{
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001474 int rc = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001475
Daniel De Graafb2167ba2011-11-28 11:49:05 -05001476 if (!xen_domain())
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001477 return -ENODEV;
1478
Julien Grall9cce2912015-10-13 17:50:11 +01001479 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
Bob Liu86839c52015-06-03 13:40:03 +08001480 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
Julien Grall9cce2912015-10-13 17:50:11 +01001481 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1482 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
Bob Liu86839c52015-06-03 13:40:03 +08001483 }
1484
Bob Liud62d8602015-11-14 11:12:17 +08001485 if (xenblk_max_queues == 0)
1486 xenblk_max_queues = num_online_cpus();
1487
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001488 rc = xen_blkif_interface_init();
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001489 if (rc)
1490 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001491
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001492 rc = xen_blkif_xenbus_init();
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001493 if (rc)
1494 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001495
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001496 failed_init:
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001497 return rc;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001498}
1499
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001500module_init(xen_blkif_init);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001501
1502MODULE_LICENSE("Dual BSD/GPL");
Bastian Blanka7e93572011-06-29 14:40:50 +02001503MODULE_ALIAS("xen-backend:vbd");