blob: 6d897664802d92257caa3c1443cbd9adc71b7a81 [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
3 *
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
9 *
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38#include <linux/spinlock.h>
39#include <linux/kthread.h>
40#include <linux/list.h>
41#include <linux/delay.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080042#include <linux/freezer.h>
Jeremy Fitzhardingeafd91d02009-09-15 14:12:37 -070043
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040044#include <xen/balloon.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080045#include <xen/events.h>
46#include <xen/page.h>
47#include <asm/xen/hypervisor.h>
48#include <asm/xen/hypercall.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040049#include "common.h"
50
51/*
52 * These are rather arbitrary. They are fairly large because adjacent requests
53 * pulled from a communication ring are quite likely to end up being part of
54 * the same scatter/gather request at the disc.
55 *
56 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57 *
58 * This will increase the chances of being able to write whole tracks.
59 * 64 should be enough to keep us competitive with Linux.
60 */
61static int blkif_reqs = 64;
62module_param_named(reqs, blkif_reqs, int, 0);
63MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64
65/* Run-time switchable: /sys/module/blkback/parameters/ */
66static unsigned int log_stats = 0;
67static unsigned int debug_lvl = 0;
68module_param(log_stats, int, 0644);
69module_param(debug_lvl, int, 0644);
70
71/*
72 * Each outstanding request that we've passed to the lower device layers has a
73 * 'pending_req' allocated to it. Each buffer_head that completes decrements
74 * the pendcnt towards zero. When it hits zero, the specified domain has a
75 * response queued for it, with the saved 'id' passed back.
76 */
77typedef struct {
78 blkif_t *blkif;
79 u64 id;
80 int nr_pages;
81 atomic_t pendcnt;
82 unsigned short operation;
83 int status;
84 struct list_head free_list;
85} pending_req_t;
86
87static pending_req_t *pending_reqs;
88static struct list_head pending_free;
89static DEFINE_SPINLOCK(pending_free_lock);
90static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
91
92#define BLKBACK_INVALID_HANDLE (~0)
93
94static struct page **pending_pages;
95static grant_handle_t *pending_grant_handles;
96
97static inline int vaddr_pagenr(pending_req_t *req, int seg)
98{
99 return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
100}
101
102static inline unsigned long vaddr(pending_req_t *req, int seg)
103{
104 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
105 return (unsigned long)pfn_to_kaddr(pfn);
106}
107
108#define pending_handle(_req, _seg) \
109 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
110
111
112static int do_block_io_op(blkif_t *blkif);
113static void dispatch_rw_block_io(blkif_t *blkif,
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800114 struct blkif_request *req,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400115 pending_req_t *pending_req);
116static void make_response(blkif_t *blkif, u64 id,
117 unsigned short op, int st);
118
119/******************************************************************
120 * misc small helpers
121 */
122static pending_req_t* alloc_req(void)
123{
124 pending_req_t *req = NULL;
125 unsigned long flags;
126
127 spin_lock_irqsave(&pending_free_lock, flags);
128 if (!list_empty(&pending_free)) {
129 req = list_entry(pending_free.next, pending_req_t, free_list);
130 list_del(&req->free_list);
131 }
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 return req;
134}
135
136static void free_req(pending_req_t *req)
137{
138 unsigned long flags;
139 int was_empty;
140
141 spin_lock_irqsave(&pending_free_lock, flags);
142 was_empty = list_empty(&pending_free);
143 list_add(&req->free_list, &pending_free);
144 spin_unlock_irqrestore(&pending_free_lock, flags);
145 if (was_empty)
146 wake_up(&pending_free_wq);
147}
148
149static void unplug_queue(blkif_t *blkif)
150{
151 if (blkif->plug == NULL)
152 return;
153 if (blkif->plug->unplug_fn)
154 blkif->plug->unplug_fn(blkif->plug);
155 blk_put_queue(blkif->plug);
156 blkif->plug = NULL;
157}
158
159static void plug_queue(blkif_t *blkif, struct block_device *bdev)
160{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800161 struct request_queue *q = bdev_get_queue(bdev);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400162
163 if (q == blkif->plug)
164 return;
165 unplug_queue(blkif);
166 blk_get_queue(q);
167 blkif->plug = q;
168}
169
170static void fast_flush_area(pending_req_t *req)
171{
172 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
173 unsigned int i, invcount = 0;
174 grant_handle_t handle;
175 int ret;
176
177 for (i = 0; i < req->nr_pages; i++) {
178 handle = pending_handle(req, i);
179 if (handle == BLKBACK_INVALID_HANDLE)
180 continue;
181 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
182 GNTMAP_host_map, handle);
183 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
184 invcount++;
185 }
186
187 ret = HYPERVISOR_grant_table_op(
188 GNTTABOP_unmap_grant_ref, unmap, invcount);
189 BUG_ON(ret);
190}
191
192/******************************************************************
193 * SCHEDULER FUNCTIONS
194 */
195
196static void print_stats(blkif_t *blkif)
197{
198 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
199 current->comm, blkif->st_oo_req,
200 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
201 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
202 blkif->st_rd_req = 0;
203 blkif->st_wr_req = 0;
204 blkif->st_oo_req = 0;
205}
206
207int blkif_schedule(void *arg)
208{
209 blkif_t *blkif = arg;
K. Y. Srinivasan2ccbfe22010-03-11 13:39:50 -0800210 struct vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400211
212 blkif_get(blkif);
213
214 if (debug_lvl)
215 printk(KERN_DEBUG "%s: started\n", current->comm);
216
217 while (!kthread_should_stop()) {
218 if (try_to_freeze())
219 continue;
K. Y. Srinivasan2ccbfe22010-03-11 13:39:50 -0800220 if (unlikely(vbd->size != vbd_size(vbd)))
221 vbd_resize(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400222
223 wait_event_interruptible(
224 blkif->wq,
225 blkif->waiting_reqs || kthread_should_stop());
226 wait_event_interruptible(
227 pending_free_wq,
228 !list_empty(&pending_free) || kthread_should_stop());
229
230 blkif->waiting_reqs = 0;
231 smp_mb(); /* clear flag *before* checking for work */
232
233 if (do_block_io_op(blkif))
234 blkif->waiting_reqs = 1;
235 unplug_queue(blkif);
236
237 if (log_stats && time_after(jiffies, blkif->st_print))
238 print_stats(blkif);
239 }
240
241 if (log_stats)
242 print_stats(blkif);
243 if (debug_lvl)
244 printk(KERN_DEBUG "%s: exiting\n", current->comm);
245
246 blkif->xenblkd = NULL;
247 blkif_put(blkif);
248
249 return 0;
250}
251
252/******************************************************************
253 * COMPLETION CALLBACK -- Called as bh->b_end_io()
254 */
255
256static void __end_block_io_op(pending_req_t *pending_req, int error)
257{
258 /* An error fails the entire request. */
259 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
260 (error == -EOPNOTSUPP)) {
261 DPRINTK("blkback: write barrier op failed, not supported\n");
262 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
263 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
264 } else if (error) {
265 DPRINTK("Buffer not up-to-date at end of operation, "
266 "error=%d\n", error);
267 pending_req->status = BLKIF_RSP_ERROR;
268 }
269
270 if (atomic_dec_and_test(&pending_req->pendcnt)) {
271 fast_flush_area(pending_req);
272 make_response(pending_req->blkif, pending_req->id,
273 pending_req->operation, pending_req->status);
274 blkif_put(pending_req->blkif);
275 free_req(pending_req);
276 }
277}
278
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800279static void end_block_io_op(struct bio *bio, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400280{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400281 __end_block_io_op(bio->bi_private, error);
282 bio_put(bio);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400283}
284
285
286/******************************************************************************
287 * NOTIFICATION FROM GUEST OS.
288 */
289
290static void blkif_notify_work(blkif_t *blkif)
291{
292 blkif->waiting_reqs = 1;
293 wake_up(&blkif->wq);
294}
295
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800296irqreturn_t blkif_be_int(int irq, void *dev_id)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400297{
298 blkif_notify_work(dev_id);
299 return IRQ_HANDLED;
300}
301
302
303
304/******************************************************************
305 * DOWNWARD CALLS -- These interface with the block-device layer proper.
306 */
307
308static int do_block_io_op(blkif_t *blkif)
309{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800310 union blkif_back_rings *blk_rings = &blkif->blk_rings;
311 struct blkif_request req;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400312 pending_req_t *pending_req;
313 RING_IDX rc, rp;
314 int more_to_do = 0;
315
316 rc = blk_rings->common.req_cons;
317 rp = blk_rings->common.sring->req_prod;
318 rmb(); /* Ensure we see queued requests up to 'rp'. */
319
320 while (rc != rp) {
321
322 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
323 break;
324
Keir Fraser8270b452009-03-06 08:29:15 +0000325 if (kthread_should_stop()) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400326 more_to_do = 1;
327 break;
328 }
329
Keir Fraser8270b452009-03-06 08:29:15 +0000330 pending_req = alloc_req();
331 if (NULL == pending_req) {
332 blkif->st_oo_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400333 more_to_do = 1;
334 break;
335 }
336
337 switch (blkif->blk_protocol) {
338 case BLKIF_PROTOCOL_NATIVE:
339 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
340 break;
341 case BLKIF_PROTOCOL_X86_32:
342 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
343 break;
344 case BLKIF_PROTOCOL_X86_64:
345 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
346 break;
347 default:
348 BUG();
349 }
350 blk_rings->common.req_cons = ++rc; /* before make_response() */
351
352 /* Apply all sanity checks to /private copy/ of request. */
353 barrier();
354
355 switch (req.operation) {
356 case BLKIF_OP_READ:
357 blkif->st_rd_req++;
358 dispatch_rw_block_io(blkif, &req, pending_req);
359 break;
360 case BLKIF_OP_WRITE_BARRIER:
361 blkif->st_br_req++;
362 /* fall through */
363 case BLKIF_OP_WRITE:
364 blkif->st_wr_req++;
365 dispatch_rw_block_io(blkif, &req, pending_req);
366 break;
367 default:
368 /* A good sign something is wrong: sleep for a while to
369 * avoid excessive CPU consumption by a bad guest. */
370 msleep(1);
371 DPRINTK("error: unknown block io operation [%d]\n",
372 req.operation);
373 make_response(blkif, req.id, req.operation,
374 BLKIF_RSP_ERROR);
375 free_req(pending_req);
376 break;
377 }
378
379 /* Yield point for this unbounded loop. */
380 cond_resched();
381 }
382
383 return more_to_do;
384}
385
386static void dispatch_rw_block_io(blkif_t *blkif,
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800387 struct blkif_request *req,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400388 pending_req_t *pending_req)
389{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400390 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
391 struct phys_req preq;
392 struct {
393 unsigned long buf; unsigned int nsec;
394 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
395 unsigned int nseg;
396 struct bio *bio = NULL;
397 int ret, i;
398 int operation;
399
400 switch (req->operation) {
401 case BLKIF_OP_READ:
402 operation = READ;
403 break;
404 case BLKIF_OP_WRITE:
405 operation = WRITE;
406 break;
407 case BLKIF_OP_WRITE_BARRIER:
408 operation = WRITE_BARRIER;
409 break;
410 default:
411 operation = 0; /* make gcc happy */
412 BUG();
413 }
414
415 /* Check that number of segments is sane. */
416 nseg = req->nr_segments;
417 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
418 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
419 DPRINTK("Bad number of segments in request (%d)\n", nseg);
420 goto fail_response;
421 }
422
423 preq.dev = req->handle;
424 preq.sector_number = req->sector_number;
425 preq.nr_sects = 0;
426
427 pending_req->blkif = blkif;
428 pending_req->id = req->id;
429 pending_req->operation = req->operation;
430 pending_req->status = BLKIF_RSP_OKAY;
431 pending_req->nr_pages = nseg;
432
433 for (i = 0; i < nseg; i++) {
434 uint32_t flags;
435
436 seg[i].nsec = req->seg[i].last_sect -
437 req->seg[i].first_sect + 1;
438
439 if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
440 (req->seg[i].last_sect < req->seg[i].first_sect))
441 goto fail_response;
442 preq.nr_sects += seg[i].nsec;
443
444 flags = GNTMAP_host_map;
445 if (operation != READ)
446 flags |= GNTMAP_readonly;
447 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
448 req->seg[i].gref, blkif->domid);
449 }
450
451 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
452 BUG_ON(ret);
453
454 for (i = 0; i < nseg; i++) {
455 if (unlikely(map[i].status != 0)) {
456 DPRINTK("invalid buffer -- could not remap it\n");
457 map[i].handle = BLKBACK_INVALID_HANDLE;
458 ret |= 1;
459 }
460
461 pending_handle(pending_req, i) = map[i].handle;
462
463 if (ret)
464 continue;
465
466 set_phys_to_machine(__pa(vaddr(
467 pending_req, i)) >> PAGE_SHIFT,
468 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
469 seg[i].buf = map[i].dev_bus_addr |
470 (req->seg[i].first_sect << 9);
471 }
472
473 if (ret)
474 goto fail_flush;
475
476 if (vbd_translate(&preq, blkif, operation) != 0) {
477 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
478 operation == READ ? "read" : "write",
479 preq.sector_number,
480 preq.sector_number + preq.nr_sects, preq.dev);
481 goto fail_flush;
482 }
483
484 plug_queue(blkif, preq.bdev);
485 atomic_set(&pending_req->pendcnt, 1);
486 blkif_get(blkif);
487
488 for (i = 0; i < nseg; i++) {
489 if (((int)preq.sector_number|(int)seg[i].nsec) &
Jeremy Fitzhardinge05d43862009-06-29 14:58:45 -0700490 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400491 DPRINTK("Misaligned I/O request from domain %d",
492 blkif->domid);
493 goto fail_put_bio;
494 }
495
496 while ((bio == NULL) ||
497 (bio_add_page(bio,
498 virt_to_page(vaddr(pending_req, i)),
499 seg[i].nsec << 9,
500 seg[i].buf & ~PAGE_MASK) == 0)) {
501 if (bio) {
502 atomic_inc(&pending_req->pendcnt);
503 submit_bio(operation, bio);
504 }
505
506 bio = bio_alloc(GFP_KERNEL, nseg-i);
507 if (unlikely(bio == NULL))
508 goto fail_put_bio;
509
510 bio->bi_bdev = preq.bdev;
511 bio->bi_private = pending_req;
512 bio->bi_end_io = end_block_io_op;
513 bio->bi_sector = preq.sector_number;
514 }
515
516 preq.sector_number += seg[i].nsec;
517 }
518
519 if (!bio) {
520 BUG_ON(operation != WRITE_BARRIER);
521 bio = bio_alloc(GFP_KERNEL, 0);
522 if (unlikely(bio == NULL))
523 goto fail_put_bio;
524
525 bio->bi_bdev = preq.bdev;
526 bio->bi_private = pending_req;
527 bio->bi_end_io = end_block_io_op;
528 bio->bi_sector = -1;
529 }
530
531 submit_bio(operation, bio);
532
533 if (operation == READ)
534 blkif->st_rd_sect += preq.nr_sects;
535 else if (operation == WRITE || operation == WRITE_BARRIER)
536 blkif->st_wr_sect += preq.nr_sects;
537
538 return;
539
540 fail_flush:
541 fast_flush_area(pending_req);
542 fail_response:
543 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
544 free_req(pending_req);
545 msleep(1); /* back off a bit */
546 return;
547
548 fail_put_bio:
549 __end_block_io_op(pending_req, -EINVAL);
550 if (bio)
551 bio_put(bio);
552 unplug_queue(blkif);
553 msleep(1); /* back off a bit */
554 return;
555}
556
557
558
559/******************************************************************
560 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
561 */
562
563
564static void make_response(blkif_t *blkif, u64 id,
565 unsigned short op, int st)
566{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800567 struct blkif_response resp;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400568 unsigned long flags;
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800569 union blkif_back_rings *blk_rings = &blkif->blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400570 int more_to_do = 0;
571 int notify;
572
573 resp.id = id;
574 resp.operation = op;
575 resp.status = st;
576
577 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
578 /* Place on the response ring for the relevant domain. */
579 switch (blkif->blk_protocol) {
580 case BLKIF_PROTOCOL_NATIVE:
581 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
582 &resp, sizeof(resp));
583 break;
584 case BLKIF_PROTOCOL_X86_32:
585 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
586 &resp, sizeof(resp));
587 break;
588 case BLKIF_PROTOCOL_X86_64:
589 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
590 &resp, sizeof(resp));
591 break;
592 default:
593 BUG();
594 }
595 blk_rings->common.rsp_prod_pvt++;
596 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
597 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
598 /*
599 * Tail check for pending requests. Allows frontend to avoid
600 * notifications if requests are already in flight (lower
601 * overheads and promotes batching).
602 */
603 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
604
605 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
606 more_to_do = 1;
607 }
608
609 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
610
611 if (more_to_do)
612 blkif_notify_work(blkif);
613 if (notify)
614 notify_remote_via_irq(blkif->irq);
615}
616
617static int __init blkif_init(void)
618{
619 int i, mmap_pages;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400620 int rc = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400621
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800622 if (!xen_pv_domain())
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400623 return -ENODEV;
624
625 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
626
627 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
628 blkif_reqs, GFP_KERNEL);
629 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
630 mmap_pages, GFP_KERNEL);
631 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
632
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400633 if (!pending_reqs || !pending_grant_handles || !pending_pages) {
634 rc = -ENOMEM;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400635 goto out_of_memory;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400636 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400637
638 for (i = 0; i < mmap_pages; i++)
639 pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
640
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400641 rc = blkif_interface_init();
642 if (rc)
643 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400644
645 memset(pending_reqs, 0, sizeof(pending_reqs));
646 INIT_LIST_HEAD(&pending_free);
647
648 for (i = 0; i < blkif_reqs; i++)
649 list_add_tail(&pending_reqs[i].free_list, &pending_free);
650
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400651 rc = blkif_xenbus_init();
652 if (rc)
653 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400654
655 return 0;
656
657 out_of_memory:
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400658 printk(KERN_ERR "%s: out of memory\n", __func__);
659 failed_init:
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400660 kfree(pending_reqs);
661 kfree(pending_grant_handles);
662 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400663 return rc;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400664}
665
666module_init(blkif_init);
667
668MODULE_LICENSE("Dual BSD/GPL");