blob: da18046d0e0773bf0986d1ef4510d3b0349e59d3 [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/******************************************************************************
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04002 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04007 * drivers/block/xen-blkfront.c
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04008 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#include <linux/spinlock.h>
38#include <linux/kthread.h>
39#include <linux/list.h>
40#include <linux/delay.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080041#include <linux/freezer.h>
Roger Pau Monne0a8704a2012-10-24 18:58:45 +020042#include <linux/bitmap.h>
Jeremy Fitzhardingeafd91d02009-09-15 14:12:37 -070043
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080044#include <xen/events.h>
45#include <xen/page.h>
Stefano Stabellinie79affc2012-08-08 17:21:14 +000046#include <xen/xen.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080047#include <asm/xen/hypervisor.h>
48#include <asm/xen/hypercall.h>
Roger Pau Monne087ffec2013-02-14 11:12:09 +010049#include <xen/balloon.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040050#include "common.h"
51
52/*
Roger Pau Monnec6cc1422013-04-17 20:18:56 +020053 * Maximum number of unused free pages to keep in the internal buffer.
54 * Setting this to a value too low will reduce memory used in each backend,
55 * but can have a performance penalty.
56 *
57 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58 * be set to a lower value that might degrade performance on some intensive
59 * IO workloads.
60 */
61
Roger Pau Monne402b27f2013-04-18 16:06:54 +020062static int xen_blkif_max_buffer_pages = 1024;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +020063module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
64MODULE_PARM_DESC(max_buffer_pages,
65"Maximum number of free pages to keep in each block backend buffer");
66
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020067/*
68 * Maximum number of grants to map persistently in blkback. For maximum
69 * performance this should be the total numbers of grants that can be used
70 * to fill the ring, but since this might become too high, specially with
71 * the use of indirect descriptors, we set it to a value that provides good
72 * performance without using too much memory.
73 *
74 * When the list of persistent grants is full we clean it up using a LRU
75 * algorithm.
76 */
77
Roger Pau Monne402b27f2013-04-18 16:06:54 +020078static int xen_blkif_max_pgrants = 1056;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +020079module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80MODULE_PARM_DESC(max_persistent_grants,
81 "Maximum number of grants to map persistently");
82
83/*
84 * The LRU mechanism to clean the lists of persistent grants needs to
85 * be executed periodically. The time interval between consecutive executions
86 * of the purge mechanism is set in ms.
87 */
88#define LRU_INTERVAL 100
89
90/*
91 * When the persistent grants list is full we will remove unused grants
92 * from the list. The percent number of grants to be removed at each LRU
93 * execution.
94 */
95#define LRU_PERCENT_CLEAN 5
96
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040097/* Run-time switchable: /sys/module/blkback/parameters/ */
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -040098static unsigned int log_stats;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040099module_param(log_stats, int, 0644);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400100
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400101#define BLKBACK_INVALID_HANDLE (~0)
102
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200103/* Number of free pages to remove on each call to free_xenballooned_pages */
104#define NUM_BATCH_FREE_PAGES 10
105
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200106static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400107{
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200108 unsigned long flags;
109
110 spin_lock_irqsave(&blkif->free_pages_lock, flags);
111 if (list_empty(&blkif->free_pages)) {
112 BUG_ON(blkif->free_pages_num != 0);
113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 return alloc_xenballooned_pages(1, page, false);
115 }
116 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 list_del(&page[0]->lru);
119 blkif->free_pages_num--;
120 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
121
122 return 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400123}
124
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200125static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
126 int num)
127{
128 unsigned long flags;
129 int i;
130
131 spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 for (i = 0; i < num; i++)
133 list_add(&page[i]->lru, &blkif->free_pages);
134 blkif->free_pages_num += num;
135 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136}
137
138static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
139{
140 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
141 struct page *page[NUM_BATCH_FREE_PAGES];
142 unsigned int num_pages = 0;
143 unsigned long flags;
144
145 spin_lock_irqsave(&blkif->free_pages_lock, flags);
146 while (blkif->free_pages_num > num) {
147 BUG_ON(list_empty(&blkif->free_pages));
148 page[num_pages] = list_first_entry(&blkif->free_pages,
149 struct page, lru);
150 list_del(&page[num_pages]->lru);
151 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0;
157 }
158 }
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page);
162}
163
164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
165
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400166static int do_block_io_op(struct xen_blkif *blkif);
167static int dispatch_rw_block_io(struct xen_blkif *blkif,
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -0400168 struct blkif_request *req,
169 struct pending_req *pending_req);
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400170static void make_response(struct xen_blkif *blkif, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400171 unsigned short op, int st);
172
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100173#define foreach_grant_safe(pos, n, rbtree, node) \
174 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
Roger Pau Monne217fd5e2013-03-18 17:49:33 +0100175 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200176 &(pos)->node != NULL; \
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100177 (pos) = container_of(n, typeof(*(pos)), node), \
178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200179
180
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200181/*
182 * We don't need locking around the persistent grant helpers
183 * because blkback uses a single-thread for each backed, so we
184 * can be sure that this functions will never be called recursively.
185 *
186 * The only exception to that is put_persistent_grant, that can be called
187 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188 * bit operations to modify the flags of a persistent grant and to count
189 * the number of used grants.
190 */
191static int add_persistent_gnt(struct xen_blkif *blkif,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200192 struct persistent_gnt *persistent_gnt)
193{
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200194 struct rb_node **new = NULL, *parent = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200195 struct persistent_gnt *this;
196
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200197 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 if (!blkif->vbd.overflow_max_grants)
199 blkif->vbd.overflow_max_grants = 1;
200 return -EBUSY;
201 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200202 /* Figure out where to put new node */
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200203 new = &blkif->persistent_gnts.rb_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200204 while (*new) {
205 this = container_of(*new, struct persistent_gnt, node);
206
207 parent = *new;
208 if (persistent_gnt->gnt < this->gnt)
209 new = &((*new)->rb_left);
210 else if (persistent_gnt->gnt > this->gnt)
211 new = &((*new)->rb_right);
212 else {
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200213 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
214 return -EINVAL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200215 }
216 }
217
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200218 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200220 /* Add new node and rebalance tree. */
221 rb_link_node(&(persistent_gnt->node), parent, new);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200222 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 blkif->persistent_gnt_c++;
224 atomic_inc(&blkif->persistent_gnt_in_use);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200225 return 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200226}
227
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200228static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200229 grant_ref_t gref)
230{
231 struct persistent_gnt *data;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200232 struct rb_node *node = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200233
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200234 node = blkif->persistent_gnts.rb_node;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200235 while (node) {
236 data = container_of(node, struct persistent_gnt, node);
237
238 if (gref < data->gnt)
239 node = node->rb_left;
240 else if (gref > data->gnt)
241 node = node->rb_right;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200242 else {
243 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 return NULL;
246 }
247 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 atomic_inc(&blkif->persistent_gnt_in_use);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200249 return data;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200250 }
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200251 }
252 return NULL;
253}
254
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200255static void put_persistent_gnt(struct xen_blkif *blkif,
256 struct persistent_gnt *persistent_gnt)
257{
258 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 atomic_dec(&blkif->persistent_gnt_in_use);
263}
264
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num)
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100267{
268 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 struct persistent_gnt *persistent_gnt;
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100271 struct rb_node *n;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100272 int ret = 0;
273 int segs_to_unmap = 0;
274
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100275 foreach_grant_safe(persistent_gnt, n, root, node) {
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100276 BUG_ON(persistent_gnt->handle ==
277 BLKBACK_INVALID_HANDLE);
278 gnttab_set_unmap_op(&unmap[segs_to_unmap],
279 (unsigned long) pfn_to_kaddr(page_to_pfn(
280 persistent_gnt->page)),
281 GNTMAP_host_map,
282 persistent_gnt->handle);
283
284 pages[segs_to_unmap] = persistent_gnt->page;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100285
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) {
Zoltan Kiss08ece5b2014-01-23 21:23:44 +0000288 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100289 BUG_ON(ret);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200290 put_free_pages(blkif, pages, segs_to_unmap);
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100291 segs_to_unmap = 0;
292 }
Roger Pau Monne7dc34112012-12-04 15:21:52 +0100293
294 rb_erase(&persistent_gnt->node, root);
295 kfree(persistent_gnt);
296 num--;
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100297 }
298 BUG_ON(num != 0);
299}
300
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200301static void unmap_purged_grants(struct work_struct *work)
302{
303 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
304 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct persistent_gnt *persistent_gnt;
306 int ret, segs_to_unmap = 0;
307 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
308
309 while(!list_empty(&blkif->persistent_purge_list)) {
310 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
311 struct persistent_gnt,
312 remove_node);
313 list_del(&persistent_gnt->remove_node);
314
315 gnttab_set_unmap_op(&unmap[segs_to_unmap],
316 vaddr(persistent_gnt->page),
317 GNTMAP_host_map,
318 persistent_gnt->handle);
319
320 pages[segs_to_unmap] = persistent_gnt->page;
321
322 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
Zoltan Kiss08ece5b2014-01-23 21:23:44 +0000323 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200324 BUG_ON(ret);
325 put_free_pages(blkif, pages, segs_to_unmap);
326 segs_to_unmap = 0;
327 }
328 kfree(persistent_gnt);
329 }
330 if (segs_to_unmap > 0) {
Zoltan Kiss08ece5b2014-01-23 21:23:44 +0000331 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200332 BUG_ON(ret);
333 put_free_pages(blkif, pages, segs_to_unmap);
334 }
335}
336
337static void purge_persistent_gnt(struct xen_blkif *blkif)
338{
339 struct persistent_gnt *persistent_gnt;
340 struct rb_node *n;
341 unsigned int num_clean, total;
Roger Pau Monne2d910542013-06-21 12:56:53 +0200342 bool scan_used = false, clean_used = false;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200343 struct rb_root *root;
344
345 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
346 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
347 !blkif->vbd.overflow_max_grants)) {
348 return;
349 }
350
351 if (work_pending(&blkif->persistent_purge_work)) {
352 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
353 return;
354 }
355
356 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
357 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
358 num_clean = min(blkif->persistent_gnt_c, num_clean);
Roger Pau Monne2d910542013-06-21 12:56:53 +0200359 if ((num_clean == 0) ||
360 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200361 return;
362
363 /*
364 * At this point, we can assure that there will be no calls
365 * to get_persistent_grant (because we are executing this code from
366 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
367 * which means that the number of currently used grants will go down,
368 * but never up, so we will always be able to remove the requested
369 * number of grants.
370 */
371
372 total = num_clean;
373
374 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
375
376 INIT_LIST_HEAD(&blkif->persistent_purge_list);
377 root = &blkif->persistent_gnts;
378purge_list:
379 foreach_grant_safe(persistent_gnt, n, root, node) {
380 BUG_ON(persistent_gnt->handle ==
381 BLKBACK_INVALID_HANDLE);
382
Roger Pau Monne2d910542013-06-21 12:56:53 +0200383 if (clean_used) {
384 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
385 continue;
386 }
387
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200388 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
389 continue;
390 if (!scan_used &&
391 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
392 continue;
393
394 rb_erase(&persistent_gnt->node, root);
395 list_add(&persistent_gnt->remove_node,
396 &blkif->persistent_purge_list);
397 if (--num_clean == 0)
398 goto finished;
399 }
400 /*
401 * If we get here it means we also need to start cleaning
402 * grants that were used since last purge in order to cope
403 * with the requested num
404 */
Roger Pau Monne2d910542013-06-21 12:56:53 +0200405 if (!scan_used && !clean_used) {
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200406 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
407 scan_used = true;
408 goto purge_list;
409 }
410finished:
Roger Pau Monne2d910542013-06-21 12:56:53 +0200411 if (!clean_used) {
412 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
413 clean_used = true;
414 goto purge_list;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200415 }
Roger Pau Monne2d910542013-06-21 12:56:53 +0200416
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200417 blkif->persistent_gnt_c -= (total - num_clean);
418 blkif->vbd.overflow_max_grants = 0;
419
420 /* We can defer this work */
421 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
422 schedule_work(&blkif->persistent_purge_work);
423 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
424 return;
425}
426
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400427/*
428 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400429 */
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200430static struct pending_req *alloc_req(struct xen_blkif *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400431{
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400432 struct pending_req *req = NULL;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400433 unsigned long flags;
434
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200435 spin_lock_irqsave(&blkif->pending_free_lock, flags);
436 if (!list_empty(&blkif->pending_free)) {
437 req = list_entry(blkif->pending_free.next, struct pending_req,
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400438 free_list);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400439 list_del(&req->free_list);
440 }
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200441 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400442 return req;
443}
444
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400445/*
446 * Return the 'pending_req' structure back to the freepool. We also
447 * wake up the thread if it was waiting for a free page.
448 */
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200449static void free_req(struct xen_blkif *blkif, struct pending_req *req)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400450{
451 unsigned long flags;
452 int was_empty;
453
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200454 spin_lock_irqsave(&blkif->pending_free_lock, flags);
455 was_empty = list_empty(&blkif->pending_free);
456 list_add(&req->free_list, &blkif->pending_free);
457 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400458 if (was_empty)
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200459 wake_up(&blkif->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400460}
461
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400462/*
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400463 * Routines for managing virtual block devices (vbds).
464 */
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400465static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
466 int operation)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400467{
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400468 struct xen_vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400469 int rc = -EACCES;
470
471 if ((operation != READ) && vbd->readonly)
472 goto out;
473
Jan Beulich8ab52152011-05-17 11:07:05 +0100474 if (likely(req->nr_sects)) {
475 blkif_sector_t end = req->sector_number + req->nr_sects;
476
477 if (unlikely(end < req->sector_number))
478 goto out;
479 if (unlikely(end > vbd_sz(vbd)))
480 goto out;
481 }
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400482
483 req->dev = vbd->pdevice;
484 req->bdev = vbd->bdev;
485 rc = 0;
486
487 out:
488 return rc;
489}
490
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400491static void xen_vbd_resize(struct xen_blkif *blkif)
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400492{
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400493 struct xen_vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400494 struct xenbus_transaction xbt;
495 int err;
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400496 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400497 unsigned long long new_size = vbd_sz(vbd);
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400498
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400499 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400500 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400501 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400502 vbd->size = new_size;
503again:
504 err = xenbus_transaction_start(&xbt);
505 if (err) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400506 pr_warn(DRV_PFX "Error starting transaction");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400507 return;
508 }
509 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400510 (unsigned long long)vbd_sz(vbd));
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400511 if (err) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400512 pr_warn(DRV_PFX "Error writing new size");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400513 goto abort;
514 }
515 /*
516 * Write the current state; we will use this to synchronize
517 * the front-end. If the current state is "connected" the
518 * front-end will get the new size information online.
519 */
520 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
521 if (err) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400522 pr_warn(DRV_PFX "Error writing the state");
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400523 goto abort;
524 }
525
526 err = xenbus_transaction_end(xbt, 0);
527 if (err == -EAGAIN)
528 goto again;
529 if (err)
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400530 pr_warn(DRV_PFX "Error ending transaction");
Laszlo Ersek496b3182011-05-13 09:45:40 -0400531 return;
Konrad Rzeszutek Wilkee9ff852011-04-20 10:57:29 -0400532abort:
533 xenbus_transaction_end(xbt, 1);
534}
535
536/*
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400537 * Notification from the guest OS.
538 */
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400539static void blkif_notify_work(struct xen_blkif *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400540{
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400541 blkif->waiting_reqs = 1;
542 wake_up(&blkif->wq);
543}
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400544
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400545irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400546{
547 blkif_notify_work(dev_id);
548 return IRQ_HANDLED;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400549}
550
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400551/*
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400552 * SCHEDULER FUNCTIONS
553 */
554
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400555static void print_stats(struct xen_blkif *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400556{
Zoltan Kiss986cacb2013-03-11 16:15:50 +0000557 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200558 " | ds %4llu | pg: %4u/%4d\n",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -0400559 current->comm, blkif->st_oo_req,
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800560 blkif->st_rd_req, blkif->st_wr_req,
Roger Pau Monnec1a15d02013-04-17 20:18:55 +0200561 blkif->st_f_req, blkif->st_ds_req,
562 blkif->persistent_gnt_c,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200563 xen_blkif_max_pgrants);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400564 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
565 blkif->st_rd_req = 0;
566 blkif->st_wr_req = 0;
567 blkif->st_oo_req = 0;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800568 blkif->st_ds_req = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400569}
570
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400571int xen_blkif_schedule(void *arg)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400572{
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -0400573 struct xen_blkif *blkif = arg;
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400574 struct xen_vbd *vbd = &blkif->vbd;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200575 unsigned long timeout;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500576 int ret;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400577
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400578 xen_blkif_get(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400579
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400580 while (!kthread_should_stop()) {
581 if (try_to_freeze())
582 continue;
Konrad Rzeszutek Wilk42c78412011-04-20 11:21:43 -0400583 if (unlikely(vbd->size != vbd_sz(vbd)))
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -0400584 xen_vbd_resize(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400585
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200586 timeout = msecs_to_jiffies(LRU_INTERVAL);
587
588 timeout = wait_event_interruptible_timeout(
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400589 blkif->wq,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200590 blkif->waiting_reqs || kthread_should_stop(),
591 timeout);
592 if (timeout == 0)
593 goto purge_gnt_list;
594 timeout = wait_event_interruptible_timeout(
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200595 blkif->pending_free_wq,
596 !list_empty(&blkif->pending_free) ||
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200597 kthread_should_stop(),
598 timeout);
599 if (timeout == 0)
600 goto purge_gnt_list;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400601
602 blkif->waiting_reqs = 0;
603 smp_mb(); /* clear flag *before* checking for work */
604
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500605 ret = do_block_io_op(blkif);
606 if (ret > 0)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400607 blkif->waiting_reqs = 1;
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -0500608 if (ret == -EACCES)
609 wait_event_interruptible(blkif->shutdown_wq,
610 kthread_should_stop());
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400611
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200612purge_gnt_list:
613 if (blkif->vbd.feature_gnt_persistent &&
614 time_after(jiffies, blkif->next_lru)) {
615 purge_persistent_gnt(blkif);
616 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
617 }
618
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200619 /* Shrink if we have more than xen_blkif_max_buffer_pages */
620 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
621
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400622 if (log_stats && time_after(jiffies, blkif->st_print))
623 print_stats(blkif);
624 }
625
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200626 /* Since we are shutting down remove all pages from the buffer */
627 shrink_free_pagepool(blkif, 0 /* All */);
628
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200629 /* Free all persistent grant pages */
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100630 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200631 free_persistent_gnts(blkif, &blkif->persistent_gnts,
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100632 blkif->persistent_gnt_c);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200633
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200634 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
Roger Pau Monne4d4f2702012-11-16 19:26:48 +0100635 blkif->persistent_gnt_c = 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200636
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400637 if (log_stats)
638 print_stats(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400639
640 blkif->xenblkd = NULL;
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400641 xen_blkif_put(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400642
643 return 0;
644}
645
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400646/*
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400647 * Unmap the grant references, and also remove the M2P over-rides
648 * used in the 'pending_req'.
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400649 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200650static void xen_blkbk_unmap(struct xen_blkif *blkif,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200651 struct grant_page *pages[],
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200652 int num)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400653{
654 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200655 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400656 unsigned int i, invcount = 0;
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400657 int ret;
658
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200659 for (i = 0; i < num; i++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200660 if (pages[i]->persistent_gnt != NULL) {
661 put_persistent_gnt(blkif, pages[i]->persistent_gnt);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200662 continue;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200663 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200664 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400665 continue;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200666 unmap_pages[invcount] = pages[i]->page;
667 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
668 GNTMAP_host_map, pages[i]->handle);
669 pages[i]->handle = BLKBACK_INVALID_HANDLE;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200670 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
Zoltan Kiss08ece5b2014-01-23 21:23:44 +0000671 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200672 BUG_ON(ret);
673 put_free_pages(blkif, unmap_pages, invcount);
674 invcount = 0;
675 }
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400676 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200677 if (invcount) {
Zoltan Kiss08ece5b2014-01-23 21:23:44 +0000678 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200679 BUG_ON(ret);
680 put_free_pages(blkif, unmap_pages, invcount);
681 }
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400682}
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400683
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200684static int xen_blkbk_map(struct xen_blkif *blkif,
685 struct grant_page *pages[],
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200686 int num, bool ro)
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400687{
688 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200689 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
690 struct persistent_gnt *persistent_gnt = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200691 phys_addr_t addr = 0;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200692 int i, seg_idx, new_map_idx;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200693 int segs_to_map = 0;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400694 int ret = 0;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200695 int last_map = 0, map_until = 0;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200696 int use_persistent_gnts;
697
698 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
699
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400700 /*
701 * Fill out preq.nr_sects with proper amount of sectors, and setup
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400702 * assign map[..] with the PFN of the page in our domain with the
703 * corresponding grant reference for each page.
704 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200705again:
706 for (i = map_until; i < num; i++) {
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400707 uint32_t flags;
708
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200709 if (use_persistent_gnts)
710 persistent_gnt = get_persistent_gnt(
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200711 blkif,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200712 pages[i]->gref);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200713
714 if (persistent_gnt) {
715 /*
716 * We are using persistent grants and
717 * the grant is already mapped
718 */
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200719 pages[i]->page = persistent_gnt->page;
720 pages[i]->persistent_gnt = persistent_gnt;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200721 } else {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200722 if (get_free_page(blkif, &pages[i]->page))
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200723 goto out_of_memory;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200724 addr = vaddr(pages[i]->page);
725 pages_to_gnt[segs_to_map] = pages[i]->page;
726 pages[i]->persistent_gnt = NULL;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200727 flags = GNTMAP_host_map;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200728 if (!use_persistent_gnts && ro)
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200729 flags |= GNTMAP_readonly;
730 gnttab_set_map_op(&map[segs_to_map++], addr,
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200731 flags, pages[i]->gref,
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200732 blkif->domid);
733 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200734 map_until = i + 1;
735 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
736 break;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400737 }
738
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200739 if (segs_to_map) {
Zoltan Kiss08ece5b2014-01-23 21:23:44 +0000740 ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200741 BUG_ON(ret);
742 }
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400743
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400744 /*
745 * Now swizzle the MFN in our domain with the MFN from the other domain
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400746 * so that when we access vaddr(pending_req,i) it has the contents of
747 * the page from the other domain.
748 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200749 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200750 if (!pages[seg_idx]->persistent_gnt) {
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200751 /* This is a newly mapped grant */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200752 BUG_ON(new_map_idx >= segs_to_map);
753 if (unlikely(map[new_map_idx].status != 0)) {
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200754 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200755 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200756 ret |= 1;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200757 goto next;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200758 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200759 pages[seg_idx]->handle = map[new_map_idx].handle;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200760 } else {
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200761 continue;
Roger Pau Monne0a8704a2012-10-24 18:58:45 +0200762 }
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200763 if (use_persistent_gnts &&
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200764 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200765 /*
766 * We are using persistent grants, the grant is
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200767 * not mapped but we might have room for it.
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200768 */
769 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
770 GFP_KERNEL);
771 if (!persistent_gnt) {
772 /*
773 * If we don't have enough memory to
774 * allocate the persistent_gnt struct
775 * map this grant non-persistenly
776 */
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200777 goto next;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200778 }
779 persistent_gnt->gnt = map[new_map_idx].ref;
780 persistent_gnt->handle = map[new_map_idx].handle;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200781 persistent_gnt->page = pages[seg_idx]->page;
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200782 if (add_persistent_gnt(blkif,
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200783 persistent_gnt)) {
784 kfree(persistent_gnt);
785 persistent_gnt = NULL;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200786 goto next;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200787 }
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200788 pages[seg_idx]->persistent_gnt = persistent_gnt;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200789 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
790 persistent_gnt->gnt, blkif->persistent_gnt_c,
Roger Pau Monne3f3aad52013-04-17 20:18:57 +0200791 xen_blkif_max_pgrants);
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200792 goto next;
793 }
794 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
795 blkif->vbd.overflow_max_grants = 1;
796 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
797 blkif->domid, blkif->vbd.handle);
798 }
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200799 /*
800 * We could not map this grant persistently, so use it as
801 * a non-persistent grant.
802 */
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200803next:
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200804 new_map_idx++;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400805 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200806 segs_to_map = 0;
807 last_map = map_until;
808 if (map_until != num)
809 goto again;
810
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400811 return ret;
Roger Pau Monnec6cc1422013-04-17 20:18:56 +0200812
813out_of_memory:
814 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
815 put_free_pages(blkif, pages_to_gnt, segs_to_map);
816 return -ENOMEM;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -0400817}
818
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200819static int xen_blkbk_map_seg(struct pending_req *pending_req)
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200820{
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200821 int rc;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200822
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200823 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200824 pending_req->nr_pages,
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200825 (pending_req->operation != BLKIF_OP_READ));
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200826
827 return rc;
828}
829
830static int xen_blkbk_parse_indirect(struct blkif_request *req,
831 struct pending_req *pending_req,
832 struct seg_buf seg[],
833 struct phys_req *preq)
834{
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200835 struct grant_page **pages = pending_req->indirect_pages;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200836 struct xen_blkif *blkif = pending_req->blkif;
837 int indirect_grefs, rc, n, nseg, i;
838 struct blkif_request_segment_aligned *segments = NULL;
839
840 nseg = pending_req->nr_pages;
841 indirect_grefs = INDIRECT_PAGES(nseg);
842 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
843
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200844 for (i = 0; i < indirect_grefs; i++)
845 pages[i]->gref = req->u.indirect.indirect_grefs[i];
846
847 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200848 if (rc)
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200849 goto unmap;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200850
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200851 for (n = 0, i = 0; n < nseg; n++) {
852 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
853 /* Map indirect segments */
854 if (segments)
855 kunmap_atomic(segments);
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200856 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200857 }
858 i = n % SEGS_PER_INDIRECT_FRAME;
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200859 pending_req->segments[n]->gref = segments[i].gref;
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200860 seg[n].nsec = segments[i].last_sect -
861 segments[i].first_sect + 1;
862 seg[n].offset = (segments[i].first_sect << 9);
863 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
864 (segments[i].last_sect < segments[i].first_sect)) {
865 rc = -EINVAL;
866 goto unmap;
867 }
868 preq->nr_sects += seg[n].nsec;
869 }
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200870
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200871unmap:
872 if (segments)
873 kunmap_atomic(segments);
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200874 xen_blkbk_unmap(blkif, pages, indirect_grefs);
Roger Pau Monne402b27f2013-04-18 16:06:54 +0200875 return rc;
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200876}
877
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -0400878static int dispatch_discard_io(struct xen_blkif *blkif,
879 struct blkif_request *req)
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800880{
881 int err = 0;
882 int status = BLKIF_RSP_OKAY;
883 struct block_device *bdev = blkif->vbd.bdev;
Konrad Rzeszutek Wilk4dae7672012-03-13 18:43:23 -0400884 unsigned long secure;
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -0500885 struct phys_req preq;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800886
Vegard Nossumea5ec762013-09-05 13:00:14 +0200887 xen_blkif_get(blkif);
888
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -0500889 preq.sector_number = req->u.discard.sector_number;
890 preq.nr_sects = req->u.discard.nr_sectors;
891
892 err = xen_vbd_translate(&preq, blkif, WRITE);
893 if (err) {
894 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
895 preq.sector_number,
896 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
897 goto fail_response;
898 }
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -0400899 blkif->st_ds_req++;
900
Konrad Rzeszutek Wilk4dae7672012-03-13 18:43:23 -0400901 secure = (blkif->vbd.discard_secure &&
902 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
903 BLKDEV_DISCARD_SECURE : 0;
904
905 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
906 req->u.discard.nr_sectors,
907 GFP_KERNEL, secure);
Konrad Rzeszutek Wilk604c4992013-01-16 11:33:52 -0500908fail_response:
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800909 if (err == -EOPNOTSUPP) {
910 pr_debug(DRV_PFX "discard op failed, not supported\n");
911 status = BLKIF_RSP_EOPNOTSUPP;
912 } else if (err)
913 status = BLKIF_RSP_ERROR;
914
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -0400915 make_response(blkif, req->u.discard.id, req->operation, status);
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -0400916 xen_blkif_put(blkif);
917 return err;
Li Dongyangb3cb0d62011-09-01 18:39:10 +0800918}
919
David Vrabel0e367ae2013-03-07 17:32:01 +0000920static int dispatch_other_io(struct xen_blkif *blkif,
921 struct blkif_request *req,
922 struct pending_req *pending_req)
923{
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200924 free_req(blkif, pending_req);
David Vrabel0e367ae2013-03-07 17:32:01 +0000925 make_response(blkif, req->u.other.id, req->operation,
926 BLKIF_RSP_EOPNOTSUPP);
927 return -EIO;
928}
929
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400930static void xen_blk_drain_io(struct xen_blkif *blkif)
931{
932 atomic_set(&blkif->drain, 1);
933 do {
Konrad Rzeszutek Wilk6927d922011-10-17 14:27:48 -0400934 /* The initial value is one, and one refcnt taken at the
935 * start of the xen_blkif_schedule thread. */
936 if (atomic_read(&blkif->refcnt) <= 2)
937 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400938 wait_for_completion_interruptible_timeout(
939 &blkif->drain_complete, HZ);
940
941 if (!atomic_read(&blkif->drain))
942 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400943 } while (!kthread_should_stop());
944 atomic_set(&blkif->drain, 0);
945}
946
Konrad Rzeszutek Wilkb0aef172011-04-15 10:58:05 -0400947/*
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400948 * Completion callback on the bio's. Called as bh->b_end_io()
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400949 */
950
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400951static void __end_block_io_op(struct pending_req *pending_req, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400952{
953 /* An error fails the entire request. */
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -0400954 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400955 (error == -EOPNOTSUPP)) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400956 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -0400957 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400958 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400959 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
960 (error == -EOPNOTSUPP)) {
961 pr_debug(DRV_PFX "write barrier op failed, not supported\n");
962 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
963 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400964 } else if (error) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -0400965 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -0400966 " error=%d\n", error);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400967 pending_req->status = BLKIF_RSP_ERROR;
968 }
969
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -0400970 /*
971 * If all of the bio's have completed it is time to unmap
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400972 * the grant references associated with 'request' and provide
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -0400973 * the proper response on the ring.
974 */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400975 if (atomic_dec_and_test(&pending_req->pendcnt)) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +0200976 xen_blkbk_unmap(pending_req->blkif,
977 pending_req->segments,
Roger Pau Monne31552ee2013-04-17 20:19:00 +0200978 pending_req->nr_pages);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400979 make_response(pending_req->blkif, pending_req->id,
980 pending_req->operation, pending_req->status);
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -0400981 xen_blkif_put(pending_req->blkif);
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -0400982 if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
983 if (atomic_read(&pending_req->blkif->drain))
984 complete(&pending_req->blkif->drain_complete);
985 }
Roger Pau Monnebf0720c2013-04-17 20:18:59 +0200986 free_req(pending_req->blkif, pending_req);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400987 }
988}
989
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -0400990/*
991 * bio callback.
992 */
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800993static void end_block_io_op(struct bio *bio, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400994{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400995 __end_block_io_op(bio->bi_private, error);
996 bio_put(bio);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400997}
998
999
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001000
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001001/*
1002 * Function to copy the from the ring buffer the 'struct blkif_request'
1003 * (which has the sectors we want, number of them, grant references, etc),
1004 * and transmute it to the block API to hand it over to the proper block disk.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001005 */
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001006static int
1007__do_block_io_op(struct xen_blkif *blkif)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001008{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -08001009 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1010 struct blkif_request req;
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001011 struct pending_req *pending_req;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001012 RING_IDX rc, rp;
1013 int more_to_do = 0;
1014
1015 rc = blk_rings->common.req_cons;
1016 rp = blk_rings->common.sring->req_prod;
1017 rmb(); /* Ensure we see queued requests up to 'rp'. */
1018
Konrad Rzeszutek Wilk8e3f8752013-01-23 16:54:32 -05001019 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1020 rc = blk_rings->common.rsp_prod_pvt;
1021 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1022 rp, rc, rp - rc, blkif->vbd.pdevice);
1023 return -EACCES;
1024 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001025 while (rc != rp) {
1026
1027 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1028 break;
1029
Keir Fraser8270b452009-03-06 08:29:15 +00001030 if (kthread_should_stop()) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001031 more_to_do = 1;
1032 break;
1033 }
1034
Roger Pau Monnebf0720c2013-04-17 20:18:59 +02001035 pending_req = alloc_req(blkif);
Keir Fraser8270b452009-03-06 08:29:15 +00001036 if (NULL == pending_req) {
1037 blkif->st_oo_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001038 more_to_do = 1;
1039 break;
1040 }
1041
1042 switch (blkif->blk_protocol) {
1043 case BLKIF_PROTOCOL_NATIVE:
1044 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1045 break;
1046 case BLKIF_PROTOCOL_X86_32:
1047 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1048 break;
1049 case BLKIF_PROTOCOL_X86_64:
1050 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1051 break;
1052 default:
1053 BUG();
1054 }
1055 blk_rings->common.req_cons = ++rc; /* before make_response() */
1056
1057 /* Apply all sanity checks to /private copy/ of request. */
1058 barrier();
David Vrabel0e367ae2013-03-07 17:32:01 +00001059
1060 switch (req.operation) {
1061 case BLKIF_OP_READ:
1062 case BLKIF_OP_WRITE:
1063 case BLKIF_OP_WRITE_BARRIER:
1064 case BLKIF_OP_FLUSH_DISKCACHE:
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001065 case BLKIF_OP_INDIRECT:
David Vrabel0e367ae2013-03-07 17:32:01 +00001066 if (dispatch_rw_block_io(blkif, &req, pending_req))
1067 goto done;
1068 break;
1069 case BLKIF_OP_DISCARD:
Roger Pau Monnebf0720c2013-04-17 20:18:59 +02001070 free_req(blkif, pending_req);
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001071 if (dispatch_discard_io(blkif, &req))
David Vrabel0e367ae2013-03-07 17:32:01 +00001072 goto done;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001073 break;
David Vrabel0e367ae2013-03-07 17:32:01 +00001074 default:
1075 if (dispatch_other_io(blkif, &req, pending_req))
1076 goto done;
1077 break;
1078 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001079
1080 /* Yield point for this unbounded loop. */
1081 cond_resched();
1082 }
David Vrabel0e367ae2013-03-07 17:32:01 +00001083done:
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001084 return more_to_do;
1085}
1086
Daniel Stoddenb4726a92011-05-28 13:21:10 -07001087static int
1088do_block_io_op(struct xen_blkif *blkif)
1089{
1090 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1091 int more_to_do;
1092
1093 do {
1094 more_to_do = __do_block_io_op(blkif);
1095 if (more_to_do)
1096 break;
1097
1098 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1099 } while (more_to_do);
1100
1101 return more_to_do;
1102}
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001103/*
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001104 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1105 * and call the 'submit_bio' to pass it to the underlying storage.
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001106 */
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -04001107static int dispatch_rw_block_io(struct xen_blkif *blkif,
1108 struct blkif_request *req,
1109 struct pending_req *pending_req)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001110{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001111 struct phys_req preq;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001112 struct seg_buf *seg = pending_req->seg;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001113 unsigned int nseg;
1114 struct bio *bio = NULL;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001115 struct bio **biolist = pending_req->biolist;
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001116 int i, nbio = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001117 int operation;
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001118 struct blk_plug plug;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001119 bool drain = false;
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001120 struct grant_page **pages = pending_req->segments;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001121 unsigned short req_operation;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001122
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001123 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1124 req->u.indirect.indirect_op : req->operation;
1125 if ((req->operation == BLKIF_OP_INDIRECT) &&
1126 (req_operation != BLKIF_OP_READ) &&
1127 (req_operation != BLKIF_OP_WRITE)) {
1128 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1129 req_operation);
1130 goto fail_response;
1131 }
1132
1133 switch (req_operation) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001134 case BLKIF_OP_READ:
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001135 blkif->st_rd_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001136 operation = READ;
1137 break;
1138 case BLKIF_OP_WRITE:
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001139 blkif->st_wr_req++;
Konrad Rzeszutek Wilk013c3ca2011-04-26 16:24:18 -04001140 operation = WRITE_ODIRECT;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001141 break;
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001142 case BLKIF_OP_WRITE_BARRIER:
1143 drain = true;
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001144 case BLKIF_OP_FLUSH_DISKCACHE:
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001145 blkif->st_f_req++;
Konrad Rzeszutek Wilk24f567f2011-05-04 17:07:27 -04001146 operation = WRITE_FLUSH;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001147 break;
1148 default:
1149 operation = 0; /* make gcc happy */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001150 goto fail_response;
1151 break;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001152 }
1153
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001154 /* Check that the number of segments is sane. */
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001155 nseg = req->operation == BLKIF_OP_INDIRECT ?
1156 req->u.indirect.nr_segments : req->u.rw.nr_segments;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -04001157
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001158 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001159 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1160 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1161 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1162 (nseg > MAX_INDIRECT_SEGMENTS))) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -04001163 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001164 nseg);
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001165 /* Haven't submitted any bio's yet. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001166 goto fail_response;
1167 }
1168
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001169 preq.nr_sects = 0;
1170
1171 pending_req->blkif = blkif;
Konrad Rzeszutek Wilk97e36832011-10-12 12:12:36 -04001172 pending_req->id = req->u.rw.id;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001173 pending_req->operation = req_operation;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001174 pending_req->status = BLKIF_RSP_OKAY;
1175 pending_req->nr_pages = nseg;
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001176
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001177 if (req->operation != BLKIF_OP_INDIRECT) {
1178 preq.dev = req->u.rw.handle;
1179 preq.sector_number = req->u.rw.sector_number;
1180 for (i = 0; i < nseg; i++) {
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001181 pages[i]->gref = req->u.rw.seg[i].gref;
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001182 seg[i].nsec = req->u.rw.seg[i].last_sect -
1183 req->u.rw.seg[i].first_sect + 1;
1184 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1185 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1186 (req->u.rw.seg[i].last_sect <
1187 req->u.rw.seg[i].first_sect))
1188 goto fail_response;
1189 preq.nr_sects += seg[i].nsec;
1190 }
1191 } else {
1192 preq.dev = req->u.indirect.handle;
1193 preq.sector_number = req->u.indirect.sector_number;
1194 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001195 goto fail_response;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001196 }
1197
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -04001198 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -04001199 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001200 operation == READ ? "read" : "write",
1201 preq.sector_number,
Chen Ganga72d9002013-02-28 10:34:23 +08001202 preq.sector_number + preq.nr_sects,
1203 blkif->vbd.pdevice);
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001204 goto fail_response;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001205 }
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001206
1207 /*
Konrad Rzeszutek Wilk3d814732011-05-12 16:53:56 -04001208 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001209 * is set there.
1210 */
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001211 for (i = 0; i < nseg; i++) {
1212 if (((int)preq.sector_number|(int)seg[i].nsec) &
1213 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
Konrad Rzeszutek Wilk22b20f22011-05-12 16:43:12 -04001214 pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
Konrad Rzeszutek Wilkebe81902011-05-12 16:42:31 -04001215 blkif->domid);
Konrad Rzeszutek Wilke9350492011-04-18 11:34:55 -04001216 goto fail_response;
1217 }
1218 }
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001219
Konrad Rzeszutek Wilk29bde092011-10-10 00:42:22 -04001220 /* Wait on all outstanding I/O's and once that has been completed
1221 * issue the WRITE_FLUSH.
1222 */
1223 if (drain)
1224 xen_blk_drain_io(pending_req->blkif);
1225
Konrad Rzeszutek Wilk01f37f22011-05-11 15:57:09 -04001226 /*
1227 * If we have failed at this point, we need to undo the M2P override,
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001228 * set gnttab_set_unmap_op on all of the grant references and perform
1229 * the hypercall to unmap the grants - that is all done in
Konrad Rzeszutek Wilk9f3aedf2011-04-15 11:50:34 -04001230 * xen_blkbk_unmap.
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001231 */
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001232 if (xen_blkbk_map_seg(pending_req))
Konrad Rzeszutek Wilk1a95fe62011-04-15 11:35:13 -04001233 goto fail_flush;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001234
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001235 /*
1236 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1237 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1238 */
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001239 xen_blkif_get(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001240
1241 for (i = 0; i < nseg; i++) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001242 while ((bio == NULL) ||
1243 (bio_add_page(bio,
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001244 pages[i]->page,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001245 seg[i].nsec << 9,
Roger Pau Monneffb1dab2013-03-18 17:49:32 +01001246 seg[i].offset) == 0)) {
Konrad Rzeszutek Wilk2e9977c2011-04-14 17:42:07 -04001247
Roger Pau Monne1e0f7a22013-06-22 09:59:17 +02001248 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1249 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001250 if (unlikely(bio == NULL))
1251 goto fail_put_bio;
1252
Konrad Rzeszutek Wilk03e0edf2011-05-12 16:19:23 -04001253 biolist[nbio++] = bio;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001254 bio->bi_bdev = preq.bdev;
1255 bio->bi_private = pending_req;
1256 bio->bi_end_io = end_block_io_op;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001257 bio->bi_iter.bi_sector = preq.sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001258 }
1259
1260 preq.sector_number += seg[i].nsec;
1261 }
1262
Li Dongyangb3cb0d62011-09-01 18:39:10 +08001263 /* This will be hit if the operation was a flush or discard. */
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001264 if (!bio) {
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001265 BUG_ON(operation != WRITE_FLUSH);
Konrad Rzeszutek Wilkb0f80122011-05-12 16:23:06 -04001266
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001267 bio = bio_alloc(GFP_KERNEL, 0);
1268 if (unlikely(bio == NULL))
1269 goto fail_put_bio;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001270
Konrad Rzeszutek Wilk42146352011-10-12 17:26:47 -04001271 biolist[nbio++] = bio;
1272 bio->bi_bdev = preq.bdev;
1273 bio->bi_private = pending_req;
1274 bio->bi_end_io = end_block_io_op;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001275 }
1276
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001277 atomic_set(&pending_req->pendcnt, nbio);
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001278 blk_start_plug(&plug);
1279
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001280 for (i = 0; i < nbio; i++)
1281 submit_bio(operation, biolist[i]);
1282
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001283 /* Let the I/Os go.. */
Konrad Rzeszutek Wilk3d68b392011-05-05 13:42:10 -04001284 blk_finish_plug(&plug);
Konrad Rzeszutek Wilka19be5f2011-04-27 12:40:11 -04001285
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001286 if (operation == READ)
1287 blkif->st_rd_sect += preq.nr_sects;
Konrad Rzeszutek Wilk5c62cb42011-10-10 12:33:21 -04001288 else if (operation & WRITE)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001289 blkif->st_wr_sect += preq.nr_sects;
1290
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001291 return 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001292
1293 fail_flush:
Roger Pau Monnebb642e82013-05-02 10:21:17 +02001294 xen_blkbk_unmap(blkif, pending_req->segments,
Roger Pau Monne31552ee2013-04-17 20:19:00 +02001295 pending_req->nr_pages);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001296 fail_response:
Konrad Rzeszutek Wilk0faa8cc2011-04-14 17:58:19 -04001297 /* Haven't submitted any bio's yet. */
Roger Pau Monne402b27f2013-04-18 16:06:54 +02001298 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
Roger Pau Monnebf0720c2013-04-17 20:18:59 +02001299 free_req(blkif, pending_req);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001300 msleep(1); /* back off a bit */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001301 return -EIO;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001302
1303 fail_put_bio:
Konrad Rzeszutek Wilk03e0edf2011-05-12 16:19:23 -04001304 for (i = 0; i < nbio; i++)
Konrad Rzeszutek Wilk77089922011-04-15 10:51:27 -04001305 bio_put(biolist[i]);
Jan Beulich0e5e0982013-03-11 09:39:55 +00001306 atomic_set(&pending_req->pendcnt, 1);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001307 __end_block_io_op(pending_req, -EINVAL);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001308 msleep(1); /* back off a bit */
Konrad Rzeszutek Wilkfc53bf72011-05-05 13:37:23 -04001309 return -EIO;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001310}
1311
1312
1313
Konrad Rzeszutek Wilka1397fa2011-04-14 17:05:23 -04001314/*
1315 * Put a response on the ring on how the operation fared.
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001316 */
Konrad Rzeszutek Wilk30fd1502011-05-12 16:47:48 -04001317static void make_response(struct xen_blkif *blkif, u64 id,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001318 unsigned short op, int st)
1319{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -08001320 struct blkif_response resp;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001321 unsigned long flags;
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -08001322 union blkif_back_rings *blk_rings = &blkif->blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001323 int notify;
1324
1325 resp.id = id;
1326 resp.operation = op;
1327 resp.status = st;
1328
1329 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1330 /* Place on the response ring for the relevant domain. */
1331 switch (blkif->blk_protocol) {
1332 case BLKIF_PROTOCOL_NATIVE:
1333 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1334 &resp, sizeof(resp));
1335 break;
1336 case BLKIF_PROTOCOL_X86_32:
1337 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1338 &resp, sizeof(resp));
1339 break;
1340 case BLKIF_PROTOCOL_X86_64:
1341 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1342 &resp, sizeof(resp));
1343 break;
1344 default:
1345 BUG();
1346 }
1347 blk_rings->common.rsp_prod_pvt++;
1348 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001349 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001350 if (notify)
1351 notify_remote_via_irq(blkif->irq);
1352}
1353
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001354static int __init xen_blkif_init(void)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001355{
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001356 int rc = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001357
Daniel De Graafb2167ba2011-11-28 11:49:05 -05001358 if (!xen_domain())
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001359 return -ENODEV;
1360
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001361 rc = xen_blkif_interface_init();
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001362 if (rc)
1363 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001364
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001365 rc = xen_blkif_xenbus_init();
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001366 if (rc)
1367 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001368
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001369 failed_init:
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -04001370 return rc;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001371}
1372
Konrad Rzeszutek Wilk8b6bf742011-04-20 11:50:43 -04001373module_init(xen_blkif_init);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001374
1375MODULE_LICENSE("Dual BSD/GPL");
Bastian Blanka7e93572011-06-29 14:40:50 +02001376MODULE_ALIAS("xen-backend:vbd");