blob: ddb31cf42093649e34da2de23dd4e77b2cf03717 [file] [log] [blame]
Hank Janssenbef4a342009-07-13 16:01:31 -07001/*
Hank Janssenbef4a342009-07-13 16:01:31 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
K. Y. Srinivasan972621c2011-05-10 07:54:19 -070020 * K. Y. Srinivasan <kys@microsoft.com>
Hank Janssenbef4a342009-07-13 16:01:31 -070021 */
Hank Janssenbef4a342009-07-13 16:01:31 -070022#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Hank Janssenbef4a342009-07-13 16:01:31 -070024#include <linux/module.h>
25#include <linux/device.h>
26#include <linux/blkdev.h>
Hank Janssenbef4a342009-07-13 16:01:31 -070027#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_devinfo.h>
Hank Janssenbef4a342009-07-13 16:01:31 -070034#include <scsi/scsi_dbg.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070035
36#include "hyperv.h"
K. Y. Srinivasancdee1502011-05-12 19:34:34 -070037#include "hyperv_storage.h"
Hank Janssenbef4a342009-07-13 16:01:31 -070038
K. Y. Srinivasan8dcf37d2011-08-27 11:31:22 -070039
40/*
41 * Copyright (c) 2009, Microsoft Corporation.
42 *
43 * This program is free software; you can redistribute it and/or modify it
44 * under the terms and conditions of the GNU General Public License,
45 * version 2, as published by the Free Software Foundation.
46 *
47 * This program is distributed in the hope it will be useful, but WITHOUT
48 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
49 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
50 * more details.
51 *
52 * You should have received a copy of the GNU General Public License along with
53 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
54 * Place - Suite 330, Boston, MA 02111-1307 USA.
55 *
56 * Authors:
57 * Haiyang Zhang <haiyangz@microsoft.com>
58 * Hank Janssen <hjanssen@microsoft.com>
59 * K. Y. Srinivasan <kys@microsoft.com>
60 *
61 */
62#include <linux/kernel.h>
63#include <linux/sched.h>
64#include <linux/completion.h>
65#include <linux/string.h>
66#include <linux/slab.h>
67#include <linux/mm.h>
68#include <linux/delay.h>
69
70#include "hyperv.h"
71#include "hyperv_storage.h"
72
73
74static inline struct storvsc_device *alloc_stor_device(struct hv_device *device)
75{
76 struct storvsc_device *stor_device;
77
78 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
79 if (!stor_device)
80 return NULL;
81
82 stor_device->destroy = false;
83 init_waitqueue_head(&stor_device->waiting_to_drain);
84 stor_device->device = device;
85 device->ext = stor_device;
86
87 return stor_device;
88}
89
90
91static inline struct storvsc_device *get_in_stor_device(
92 struct hv_device *device)
93{
94 struct storvsc_device *stor_device;
95 unsigned long flags;
96
97 spin_lock_irqsave(&device->channel->inbound_lock, flags);
98 stor_device = (struct storvsc_device *)device->ext;
99
100 if (!stor_device)
101 goto get_in_err;
102
103 /*
104 * If the device is being destroyed; allow incoming
105 * traffic only to cleanup outstanding requests.
106 */
107
108 if (stor_device->destroy &&
109 (atomic_read(&stor_device->num_outstanding_req) == 0))
110 stor_device = NULL;
111
112get_in_err:
113 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
114 return stor_device;
115
116}
117
118static int storvsc_channel_init(struct hv_device *device)
119{
120 struct storvsc_device *stor_device;
121 struct hv_storvsc_request *request;
122 struct vstor_packet *vstor_packet;
123 int ret, t;
124
125 stor_device = get_out_stor_device(device);
126 if (!stor_device)
127 return -ENODEV;
128
129 request = &stor_device->init_request;
130 vstor_packet = &request->vstor_packet;
131
132 /*
133 * Now, initiate the vsc/vsp initialization protocol on the open
134 * channel
135 */
136 memset(request, 0, sizeof(struct hv_storvsc_request));
137 init_completion(&request->wait_event);
138 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
139 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
140
141 ret = vmbus_sendpacket(device->channel, vstor_packet,
142 sizeof(struct vstor_packet),
143 (unsigned long)request,
144 VM_PKT_DATA_INBAND,
145 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
146 if (ret != 0)
147 goto cleanup;
148
149 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
150 if (t == 0) {
151 ret = -ETIMEDOUT;
152 goto cleanup;
153 }
154
155 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
156 vstor_packet->status != 0)
157 goto cleanup;
158
159
160 /* reuse the packet for version range supported */
161 memset(vstor_packet, 0, sizeof(struct vstor_packet));
162 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
163 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
164
165 vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
166 FILL_VMSTOR_REVISION(vstor_packet->version.revision);
167
168 ret = vmbus_sendpacket(device->channel, vstor_packet,
169 sizeof(struct vstor_packet),
170 (unsigned long)request,
171 VM_PKT_DATA_INBAND,
172 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
173 if (ret != 0)
174 goto cleanup;
175
176 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
177 if (t == 0) {
178 ret = -ETIMEDOUT;
179 goto cleanup;
180 }
181
182 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
183 vstor_packet->status != 0)
184 goto cleanup;
185
186
187 memset(vstor_packet, 0, sizeof(struct vstor_packet));
188 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
189 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
190 vstor_packet->storage_channel_properties.port_number =
191 stor_device->port_number;
192
193 ret = vmbus_sendpacket(device->channel, vstor_packet,
194 sizeof(struct vstor_packet),
195 (unsigned long)request,
196 VM_PKT_DATA_INBAND,
197 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
198
199 if (ret != 0)
200 goto cleanup;
201
202 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
203 if (t == 0) {
204 ret = -ETIMEDOUT;
205 goto cleanup;
206 }
207
208 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
209 vstor_packet->status != 0)
210 goto cleanup;
211
212 stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
213 stor_device->target_id
214 = vstor_packet->storage_channel_properties.target_id;
215
216 memset(vstor_packet, 0, sizeof(struct vstor_packet));
217 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
218 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
219
220 ret = vmbus_sendpacket(device->channel, vstor_packet,
221 sizeof(struct vstor_packet),
222 (unsigned long)request,
223 VM_PKT_DATA_INBAND,
224 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
225
226 if (ret != 0)
227 goto cleanup;
228
229 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
230 if (t == 0) {
231 ret = -ETIMEDOUT;
232 goto cleanup;
233 }
234
235 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
236 vstor_packet->status != 0)
237 goto cleanup;
238
239
240cleanup:
241 return ret;
242}
243
244static void storvsc_on_io_completion(struct hv_device *device,
245 struct vstor_packet *vstor_packet,
246 struct hv_storvsc_request *request)
247{
248 struct storvsc_device *stor_device;
249 struct vstor_packet *stor_pkt;
250
251 stor_device = (struct storvsc_device *)device->ext;
252
253 stor_pkt = &request->vstor_packet;
254
255
256 /* Copy over the status...etc */
257 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
258 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
259 stor_pkt->vm_srb.sense_info_length =
260 vstor_packet->vm_srb.sense_info_length;
261
262 if (vstor_packet->vm_srb.scsi_status != 0 ||
263 vstor_packet->vm_srb.srb_status != 1){
264 DPRINT_WARN(STORVSC,
265 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
266 stor_pkt->vm_srb.cdb[0],
267 vstor_packet->vm_srb.scsi_status,
268 vstor_packet->vm_srb.srb_status);
269 }
270
271 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
272 /* CHECK_CONDITION */
273 if (vstor_packet->vm_srb.srb_status & 0x80) {
274 /* autosense data available */
275 DPRINT_WARN(STORVSC, "storvsc pkt %p autosense data "
276 "valid - len %d\n", request,
277 vstor_packet->vm_srb.sense_info_length);
278
279 memcpy(request->sense_buffer,
280 vstor_packet->vm_srb.sense_data,
281 vstor_packet->vm_srb.sense_info_length);
282
283 }
284 }
285
286 stor_pkt->vm_srb.data_transfer_length =
287 vstor_packet->vm_srb.data_transfer_length;
288
289 request->on_io_completion(request);
290
291 if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
292 stor_device->drain_notify)
293 wake_up(&stor_device->waiting_to_drain);
294
295
296}
297
298static void storvsc_on_receive(struct hv_device *device,
299 struct vstor_packet *vstor_packet,
300 struct hv_storvsc_request *request)
301{
302 switch (vstor_packet->operation) {
303 case VSTOR_OPERATION_COMPLETE_IO:
304 storvsc_on_io_completion(device, vstor_packet, request);
305 break;
306 case VSTOR_OPERATION_REMOVE_DEVICE:
307
308 default:
309 break;
310 }
311}
312
313static void storvsc_on_channel_callback(void *context)
314{
315 struct hv_device *device = (struct hv_device *)context;
316 struct storvsc_device *stor_device;
317 u32 bytes_recvd;
318 u64 request_id;
319 unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
320 struct hv_storvsc_request *request;
321 int ret;
322
323
324 stor_device = get_in_stor_device(device);
325 if (!stor_device)
326 return;
327
328 do {
329 ret = vmbus_recvpacket(device->channel, packet,
330 ALIGN(sizeof(struct vstor_packet), 8),
331 &bytes_recvd, &request_id);
332 if (ret == 0 && bytes_recvd > 0) {
333
334 request = (struct hv_storvsc_request *)
335 (unsigned long)request_id;
336
337 if ((request == &stor_device->init_request) ||
338 (request == &stor_device->reset_request)) {
339
340 memcpy(&request->vstor_packet, packet,
341 sizeof(struct vstor_packet));
342 complete(&request->wait_event);
343 } else {
344 storvsc_on_receive(device,
345 (struct vstor_packet *)packet,
346 request);
347 }
348 } else {
349 break;
350 }
351 } while (1);
352
353 return;
354}
355
356static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
357{
358 struct vmstorage_channel_properties props;
359 int ret;
360
361 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
362
363 /* Open the channel */
364 ret = vmbus_open(device->channel,
365 ring_size,
366 ring_size,
367 (void *)&props,
368 sizeof(struct vmstorage_channel_properties),
369 storvsc_on_channel_callback, device);
370
371 if (ret != 0)
372 return ret;
373
374 ret = storvsc_channel_init(device);
375
376 return ret;
377}
378
379int storvsc_dev_add(struct hv_device *device,
380 void *additional_info)
381{
382 struct storvsc_device *stor_device;
383 struct storvsc_device_info *device_info;
384 int ret = 0;
385
386 device_info = (struct storvsc_device_info *)additional_info;
387 stor_device = alloc_stor_device(device);
388 if (!stor_device)
389 return -ENOMEM;
390
391 /* Save the channel properties to our storvsc channel */
392
393 /*
394 * If we support more than 1 scsi channel, we need to set the
395 * port number here to the scsi channel but how do we get the
396 * scsi channel prior to the bus scan.
397 *
398 * The host does not support this.
399 */
400
401 stor_device->port_number = device_info->port_number;
402 /* Send it back up */
403 ret = storvsc_connect_to_vsp(device, device_info->ring_buffer_size);
404 if (ret) {
405 kfree(stor_device);
406 return ret;
407 }
408 device_info->path_id = stor_device->path_id;
409 device_info->target_id = stor_device->target_id;
410
411 return ret;
412}
413
414int storvsc_dev_remove(struct hv_device *device)
415{
416 struct storvsc_device *stor_device;
417 unsigned long flags;
418
419 stor_device = (struct storvsc_device *)device->ext;
420
421 spin_lock_irqsave(&device->channel->inbound_lock, flags);
422 stor_device->destroy = true;
423 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
424
425 /*
426 * At this point, all outbound traffic should be disable. We
427 * only allow inbound traffic (responses) to proceed so that
428 * outstanding requests can be completed.
429 */
430
431 storvsc_wait_to_drain(stor_device);
432
433 /*
434 * Since we have already drained, we don't need to busy wait
435 * as was done in final_release_stor_device()
436 * Note that we cannot set the ext pointer to NULL until
437 * we have drained - to drain the outgoing packets, we need to
438 * allow incoming packets.
439 */
440 spin_lock_irqsave(&device->channel->inbound_lock, flags);
441 device->ext = NULL;
442 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
443
444 /* Close the channel */
445 vmbus_close(device->channel);
446
447 kfree(stor_device);
448 return 0;
449}
450
451int storvsc_do_io(struct hv_device *device,
452 struct hv_storvsc_request *request)
453{
454 struct storvsc_device *stor_device;
455 struct vstor_packet *vstor_packet;
456 int ret = 0;
457
458 vstor_packet = &request->vstor_packet;
459 stor_device = get_out_stor_device(device);
460
461 if (!stor_device)
462 return -ENODEV;
463
464
465 request->device = device;
466
467
468 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
469
470 vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
471
472
473 vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
474
475
476 vstor_packet->vm_srb.data_transfer_length =
477 request->data_buffer.len;
478
479 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
480
481 if (request->data_buffer.len) {
482 ret = vmbus_sendpacket_multipagebuffer(device->channel,
483 &request->data_buffer,
484 vstor_packet,
485 sizeof(struct vstor_packet),
486 (unsigned long)request);
487 } else {
488 ret = vmbus_sendpacket(device->channel, vstor_packet,
489 sizeof(struct vstor_packet),
490 (unsigned long)request,
491 VM_PKT_DATA_INBAND,
492 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
493 }
494
495 if (ret != 0)
496 return ret;
497
498 atomic_inc(&stor_device->num_outstanding_req);
499
500 return ret;
501}
502
503/*
504 * The channel properties uniquely specify how the device is to be
505 * presented to the guest. Map this information for use by the block
506 * driver. For Linux guests on Hyper-V, we emulate a scsi HBA in the guest
507 * (storvsc_drv) and so scsi devices in the guest are handled by
508 * native upper level Linux drivers. Consequently, Hyper-V
509 * block driver, while being a generic block driver, presently does not
510 * deal with anything other than devices that would need to be presented
511 * to the guest as an IDE disk.
512 *
513 * This function maps the channel properties as embedded in the input
514 * parameter device_info onto information necessary to register the
515 * corresponding block device.
516 *
517 * Currently, there is no way to stop the emulation of the block device
518 * on the host side. And so, to prevent the native IDE drivers in Linux
519 * from taking over these devices (to be managedby Hyper-V block
520 * driver), we will take over if need be the major of the IDE controllers.
521 *
522 */
523
524int storvsc_get_major_info(struct storvsc_device_info *device_info,
525 struct storvsc_major_info *major_info)
526{
527 static bool ide0_registered;
528 static bool ide1_registered;
529
530 /*
531 * For now we only support IDE disks.
532 */
533 major_info->devname = "ide";
534 major_info->diskname = "hd";
535
536 if (device_info->path_id) {
537 major_info->major = 22;
538 if (!ide1_registered) {
539 major_info->do_register = true;
540 ide1_registered = true;
541 } else
542 major_info->do_register = false;
543
544 if (device_info->target_id)
545 major_info->index = 3;
546 else
547 major_info->index = 2;
548
549 return 0;
550 } else {
551 major_info->major = 3;
552 if (!ide0_registered) {
553 major_info->do_register = true;
554 ide0_registered = true;
555 } else
556 major_info->do_register = false;
557
558 if (device_info->target_id)
559 major_info->index = 1;
560 else
561 major_info->index = 0;
562
563 return 0;
564 }
565
566 return -ENODEV;
567}
K. Y. Srinivasan2db2cab2011-05-10 07:54:39 -0700568static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
Hank Janssenbef4a342009-07-13 16:01:31 -0700569
K. Y. Srinivasan3d598ce2011-05-10 07:54:40 -0700570module_param(storvsc_ringbuffer_size, int, S_IRUGO);
571MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
572
K. Y. Srinivasan972621c2011-05-10 07:54:19 -0700573struct hv_host_device {
K. Y. Srinivasan97c15292011-05-10 07:54:21 -0700574 struct hv_device *dev;
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -0700575 struct kmem_cache *request_pool;
576 unsigned int port;
577 unsigned char path;
578 unsigned char target;
Hank Janssenbef4a342009-07-13 16:01:31 -0700579};
580
581struct storvsc_cmd_request {
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -0700582 struct list_head entry;
583 struct scsi_cmnd *cmd;
Hank Janssenbef4a342009-07-13 16:01:31 -0700584
585 unsigned int bounce_sgl_count;
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -0700586 struct scatterlist *bounce_sgl;
Hank Janssenbef4a342009-07-13 16:01:31 -0700587
Nicolas Palix0b3f6832009-07-29 14:10:19 +0200588 struct hv_storvsc_request request;
Hank Janssenbef4a342009-07-13 16:01:31 -0700589};
590
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -0700591static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
592{
593 *target =
594 dev->dev_instance.b[5] << 8 | dev->dev_instance.b[4];
595
596 *path =
597 dev->dev_instance.b[3] << 24 |
598 dev->dev_instance.b[2] << 16 |
599 dev->dev_instance.b[1] << 8 | dev->dev_instance.b[0];
600}
601
Hank Janssenbef4a342009-07-13 16:01:31 -0700602
K. Y. Srinivasan5b60ace2011-05-10 07:54:25 -0700603static int storvsc_device_alloc(struct scsi_device *sdevice)
604{
605 /*
606 * This enables luns to be located sparsely. Otherwise, we may not
607 * discovered them.
608 */
609 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
610 return 0;
611}
612
K. Y. Srinivasana1ebfea2011-05-10 07:54:26 -0700613static int storvsc_merge_bvec(struct request_queue *q,
614 struct bvec_merge_data *bmd, struct bio_vec *bvec)
615{
616 /* checking done by caller. */
617 return bvec->bv_len;
618}
619
K. Y. Srinivasan419f2d02011-05-10 07:54:27 -0700620static int storvsc_device_configure(struct scsi_device *sdevice)
621{
622 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
623 STORVSC_MAX_IO_REQUESTS);
624
K. Y. Srinivasan419f2d02011-05-10 07:54:27 -0700625 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
626
K. Y. Srinivasan419f2d02011-05-10 07:54:27 -0700627 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
628
629 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
K. Y. Srinivasan419f2d02011-05-10 07:54:27 -0700630
631 return 0;
632}
633
K. Y. Srinivasan49a3c7a2011-05-10 07:54:28 -0700634static void destroy_bounce_buffer(struct scatterlist *sgl,
635 unsigned int sg_count)
636{
637 int i;
638 struct page *page_buf;
639
640 for (i = 0; i < sg_count; i++) {
641 page_buf = sg_page((&sgl[i]));
642 if (page_buf != NULL)
643 __free_page(page_buf);
644 }
645
646 kfree(sgl);
647}
648
K. Y. Srinivasan3862ef32011-05-10 07:54:29 -0700649static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
650{
651 int i;
652
653 /* No need to check */
654 if (sg_count < 2)
655 return -1;
656
657 /* We have at least 2 sg entries */
658 for (i = 0; i < sg_count; i++) {
659 if (i == 0) {
660 /* make sure 1st one does not have hole */
661 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
662 return i;
663 } else if (i == sg_count - 1) {
664 /* make sure last one does not have hole */
665 if (sgl[i].offset != 0)
666 return i;
667 } else {
668 /* make sure no hole in the middle */
669 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
670 return i;
671 }
672 }
673 return -1;
674}
675
K. Y. Srinivasana9753cb2011-05-10 07:54:30 -0700676static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
677 unsigned int sg_count,
678 unsigned int len)
679{
680 int i;
681 int num_pages;
682 struct scatterlist *bounce_sgl;
683 struct page *page_buf;
684
685 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
686
687 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
688 if (!bounce_sgl)
689 return NULL;
690
691 for (i = 0; i < num_pages; i++) {
692 page_buf = alloc_page(GFP_ATOMIC);
693 if (!page_buf)
694 goto cleanup;
695 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
696 }
697
698 return bounce_sgl;
699
700cleanup:
701 destroy_bounce_buffer(bounce_sgl, num_pages);
702 return NULL;
703}
704
K. Y. Srinivasan29fe2c92011-05-10 07:54:31 -0700705
706/* Assume the original sgl has enough room */
707static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
708 struct scatterlist *bounce_sgl,
709 unsigned int orig_sgl_count)
710{
711 int i;
712 int j = 0;
713 unsigned long src, dest;
714 unsigned int srclen, destlen, copylen;
715 unsigned int total_copied = 0;
716 unsigned long bounce_addr = 0;
717 unsigned long dest_addr = 0;
718 unsigned long flags;
719
720 local_irq_save(flags);
721
722 for (i = 0; i < orig_sgl_count; i++) {
723 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
724 KM_IRQ0) + orig_sgl[i].offset;
725 dest = dest_addr;
726 destlen = orig_sgl[i].length;
727
728 if (bounce_addr == 0)
729 bounce_addr =
730 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
731 KM_IRQ0);
732
733 while (destlen) {
734 src = bounce_addr + bounce_sgl[j].offset;
735 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
736
737 copylen = min(srclen, destlen);
738 memcpy((void *)dest, (void *)src, copylen);
739
740 total_copied += copylen;
741 bounce_sgl[j].offset += copylen;
742 destlen -= copylen;
743 dest += copylen;
744
745 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
746 /* full */
747 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
748 j++;
749
750 /* if we need to use another bounce buffer */
751 if (destlen || i != orig_sgl_count - 1)
752 bounce_addr =
753 (unsigned long)kmap_atomic(
754 sg_page((&bounce_sgl[j])), KM_IRQ0);
755 } else if (destlen == 0 && i == orig_sgl_count - 1) {
756 /* unmap the last bounce that is < PAGE_SIZE */
757 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
758 }
759 }
760
761 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
762 KM_IRQ0);
763 }
764
765 local_irq_restore(flags);
766
767 return total_copied;
768}
769
K. Y. Srinivasan6a8ff442011-05-10 07:54:32 -0700770
771/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
772static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
773 struct scatterlist *bounce_sgl,
774 unsigned int orig_sgl_count)
775{
776 int i;
777 int j = 0;
778 unsigned long src, dest;
779 unsigned int srclen, destlen, copylen;
780 unsigned int total_copied = 0;
781 unsigned long bounce_addr = 0;
782 unsigned long src_addr = 0;
783 unsigned long flags;
784
785 local_irq_save(flags);
786
787 for (i = 0; i < orig_sgl_count; i++) {
788 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
789 KM_IRQ0) + orig_sgl[i].offset;
790 src = src_addr;
791 srclen = orig_sgl[i].length;
792
793 if (bounce_addr == 0)
794 bounce_addr =
795 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
796 KM_IRQ0);
797
798 while (srclen) {
799 /* assume bounce offset always == 0 */
800 dest = bounce_addr + bounce_sgl[j].length;
801 destlen = PAGE_SIZE - bounce_sgl[j].length;
802
803 copylen = min(srclen, destlen);
804 memcpy((void *)dest, (void *)src, copylen);
805
806 total_copied += copylen;
807 bounce_sgl[j].length += copylen;
808 srclen -= copylen;
809 src += copylen;
810
811 if (bounce_sgl[j].length == PAGE_SIZE) {
812 /* full..move to next entry */
813 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
814 j++;
815
816 /* if we need to use another bounce buffer */
817 if (srclen || i != orig_sgl_count - 1)
818 bounce_addr =
819 (unsigned long)kmap_atomic(
820 sg_page((&bounce_sgl[j])), KM_IRQ0);
821
822 } else if (srclen == 0 && i == orig_sgl_count - 1) {
823 /* unmap the last bounce that is < PAGE_SIZE */
824 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
825 }
826 }
827
828 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
829 }
830
831 local_irq_restore(flags);
832
833 return total_copied;
834}
835
K. Y. Srinivasan5c5c0232011-05-10 07:54:33 -0700836
K. Y. Srinivasan5c5c0232011-05-10 07:54:33 -0700837static int storvsc_remove(struct hv_device *dev)
838{
K. Y. Srinivasan5c5c0232011-05-10 07:54:33 -0700839 struct Scsi_Host *host = dev_get_drvdata(&dev->device);
840 struct hv_host_device *host_dev =
841 (struct hv_host_device *)host->hostdata;
842
K. Y. Srinivasan5c5c0232011-05-10 07:54:33 -0700843 scsi_remove_host(host);
844
K. Y. Srinivasan5c5c0232011-05-10 07:54:33 -0700845 scsi_host_put(host);
K. Y. Srinivasand36b0a02011-06-06 15:49:29 -0700846
K. Y. Srinivasan2935a402011-06-06 15:49:28 -0700847 storvsc_dev_remove(dev);
848 if (host_dev->request_pool) {
849 kmem_cache_destroy(host_dev->request_pool);
850 host_dev->request_pool = NULL;
851 }
K. Y. Srinivasan5c5c0232011-05-10 07:54:33 -0700852 return 0;
853}
854
K. Y. Srinivasan62838ce2011-05-10 07:54:34 -0700855
856static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
857 sector_t capacity, int *info)
858{
K. Y. Srinivasan5326fd52011-05-10 07:54:52 -0700859 sector_t nsect = capacity;
860 sector_t cylinders = nsect;
861 int heads, sectors_pt;
K. Y. Srinivasan62838ce2011-05-10 07:54:34 -0700862
K. Y. Srinivasan5326fd52011-05-10 07:54:52 -0700863 /*
864 * We are making up these values; let us keep it simple.
865 */
866 heads = 0xff;
867 sectors_pt = 0x3f; /* Sectors per track */
868 sector_div(cylinders, heads * sectors_pt);
869 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
870 cylinders = 0xffff;
K. Y. Srinivasan62838ce2011-05-10 07:54:34 -0700871
872 info[0] = heads;
K. Y. Srinivasan5326fd52011-05-10 07:54:52 -0700873 info[1] = sectors_pt;
874 info[2] = (int)cylinders;
K. Y. Srinivasan62838ce2011-05-10 07:54:34 -0700875
K. Y. Srinivasan62838ce2011-05-10 07:54:34 -0700876 return 0;
877}
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700878
879static int storvsc_host_reset(struct hv_device *device)
880{
881 struct storvsc_device *stor_device;
882 struct hv_storvsc_request *request;
883 struct vstor_packet *vstor_packet;
884 int ret, t;
885
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700886
K. Y. Srinivasan1eaaddf2011-08-27 11:31:03 -0700887 stor_device = get_out_stor_device(device);
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700888 if (!stor_device)
K. Y. Srinivasanf3b74162011-08-25 09:49:08 -0700889 return -ENODEV;
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700890
891 request = &stor_device->reset_request;
892 vstor_packet = &request->vstor_packet;
893
894 init_completion(&request->wait_event);
895
896 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
897 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
898 vstor_packet->vm_srb.path_id = stor_device->path_id;
899
900 ret = vmbus_sendpacket(device->channel, vstor_packet,
901 sizeof(struct vstor_packet),
902 (unsigned long)&stor_device->reset_request,
903 VM_PKT_DATA_INBAND,
904 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
905 if (ret != 0)
906 goto cleanup;
907
K. Y. Srinivasan46d2eb62011-06-16 13:16:36 -0700908 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700909 if (t == 0) {
910 ret = -ETIMEDOUT;
911 goto cleanup;
912 }
913
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700914
915 /*
916 * At this point, all outstanding requests in the adapter
917 * should have been flushed out and return to us
918 */
919
920cleanup:
K. Y. Srinivasanaa3d7892011-05-10 07:54:37 -0700921 return ret;
922}
923
924
K. Y. Srinivasan96e690be2011-05-10 07:54:38 -0700925/*
926 * storvsc_host_reset_handler - Reset the scsi HBA
927 */
928static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
929{
930 int ret;
931 struct hv_host_device *host_dev =
932 (struct hv_host_device *)scmnd->device->host->hostdata;
933 struct hv_device *dev = host_dev->dev;
934
K. Y. Srinivasan96e690be2011-05-10 07:54:38 -0700935 ret = storvsc_host_reset(dev);
936 if (ret != 0)
937 return ret;
938
K. Y. Srinivasan96e690be2011-05-10 07:54:38 -0700939 return ret;
940}
941
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700942
943/*
944 * storvsc_commmand_completion - Command completion processing
945 */
946static void storvsc_commmand_completion(struct hv_storvsc_request *request)
947{
948 struct storvsc_cmd_request *cmd_request =
949 (struct storvsc_cmd_request *)request->context;
950 struct scsi_cmnd *scmnd = cmd_request->cmd;
951 struct hv_host_device *host_dev =
952 (struct hv_host_device *)scmnd->device->host->hostdata;
953 void (*scsi_done_fn)(struct scsi_cmnd *);
954 struct scsi_sense_hdr sense_hdr;
955 struct vmscsi_request *vm_srb;
956
K. Y. Srinivasan3612ef92011-08-27 11:31:20 -0700957 vm_srb = &request->vstor_packet.vm_srb;
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700958 if (cmd_request->bounce_sgl_count) {
K. Y. Srinivasan3612ef92011-08-27 11:31:20 -0700959 if (vm_srb->data_in == READ_TYPE) {
960 copy_from_bounce_buffer(scsi_sglist(scmnd),
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700961 cmd_request->bounce_sgl,
962 scsi_sg_count(scmnd));
K. Y. Srinivasan3612ef92011-08-27 11:31:20 -0700963 destroy_bounce_buffer(cmd_request->bounce_sgl,
964 cmd_request->bounce_sgl_count);
965 }
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700966 }
967
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700968 scmnd->result = vm_srb->scsi_status;
969
970 if (scmnd->result) {
971 if (scsi_normalize_sense(scmnd->sense_buffer,
972 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
973 scsi_print_sense_hdr("storvsc", &sense_hdr);
974 }
975
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700976 scsi_set_resid(scmnd,
977 request->data_buffer.len -
978 vm_srb->data_transfer_length);
979
980 scsi_done_fn = scmnd->scsi_done;
981
982 scmnd->host_scribble = NULL;
983 scmnd->scsi_done = NULL;
984
K. Y. Srinivasan8a411ba2011-05-10 07:54:41 -0700985 scsi_done_fn(scmnd);
986
987 kmem_cache_free(host_dev->request_pool, cmd_request);
988}
989
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -0700990
991/*
992 * storvsc_queuecommand - Initiate command processing
993 */
994static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
995 void (*done)(struct scsi_cmnd *))
996{
997 int ret;
998 struct hv_host_device *host_dev =
999 (struct hv_host_device *)scmnd->device->host->hostdata;
1000 struct hv_device *dev = host_dev->dev;
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001001 struct hv_storvsc_request *request;
1002 struct storvsc_cmd_request *cmd_request;
1003 unsigned int request_size = 0;
1004 int i;
1005 struct scatterlist *sgl;
1006 unsigned int sg_count = 0;
1007 struct vmscsi_request *vm_srb;
1008
1009
1010 /* If retrying, no need to prep the cmd */
1011 if (scmnd->host_scribble) {
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001012
1013 cmd_request =
1014 (struct storvsc_cmd_request *)scmnd->host_scribble;
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001015
1016 goto retry_request;
1017 }
1018
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001019 scmnd->scsi_done = done;
1020
1021 request_size = sizeof(struct storvsc_cmd_request);
1022
1023 cmd_request = kmem_cache_zalloc(host_dev->request_pool,
1024 GFP_ATOMIC);
1025 if (!cmd_request) {
1026 scmnd->scsi_done = NULL;
1027 return SCSI_MLQUEUE_DEVICE_BUSY;
1028 }
1029
1030 /* Setup the cmd request */
1031 cmd_request->bounce_sgl_count = 0;
1032 cmd_request->bounce_sgl = NULL;
1033 cmd_request->cmd = scmnd;
1034
1035 scmnd->host_scribble = (unsigned char *)cmd_request;
1036
1037 request = &cmd_request->request;
1038 vm_srb = &request->vstor_packet.vm_srb;
1039
1040
1041 /* Build the SRB */
1042 switch (scmnd->sc_data_direction) {
1043 case DMA_TO_DEVICE:
1044 vm_srb->data_in = WRITE_TYPE;
1045 break;
1046 case DMA_FROM_DEVICE:
1047 vm_srb->data_in = READ_TYPE;
1048 break;
1049 default:
1050 vm_srb->data_in = UNKNOWN_TYPE;
1051 break;
1052 }
1053
1054 request->on_io_completion = storvsc_commmand_completion;
1055 request->context = cmd_request;/* scmnd; */
1056
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001057 vm_srb->port_number = host_dev->port;
1058 vm_srb->path_id = scmnd->device->channel;
1059 vm_srb->target_id = scmnd->device->id;
1060 vm_srb->lun = scmnd->device->lun;
1061
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001062 vm_srb->cdb_length = scmnd->cmd_len;
1063
1064 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1065
1066 request->sense_buffer = scmnd->sense_buffer;
1067
1068
1069 request->data_buffer.len = scsi_bufflen(scmnd);
1070 if (scsi_sg_count(scmnd)) {
1071 sgl = (struct scatterlist *)scsi_sglist(scmnd);
1072 sg_count = scsi_sg_count(scmnd);
1073
1074 /* check if we need to bounce the sgl */
1075 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1076 cmd_request->bounce_sgl =
1077 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
1078 scsi_bufflen(scmnd));
1079 if (!cmd_request->bounce_sgl) {
1080 scmnd->scsi_done = NULL;
1081 scmnd->host_scribble = NULL;
1082 kmem_cache_free(host_dev->request_pool,
1083 cmd_request);
1084
1085 return SCSI_MLQUEUE_HOST_BUSY;
1086 }
1087
1088 cmd_request->bounce_sgl_count =
1089 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
1090 PAGE_SHIFT;
1091
K. Y. Srinivasanfa23b8c2011-08-27 11:31:21 -07001092 if (vm_srb->data_in == WRITE_TYPE)
1093 copy_to_bounce_buffer(sgl,
1094 cmd_request->bounce_sgl,
1095 scsi_sg_count(scmnd));
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001096
1097 sgl = cmd_request->bounce_sgl;
1098 sg_count = cmd_request->bounce_sgl_count;
1099 }
1100
1101 request->data_buffer.offset = sgl[0].offset;
1102
1103 for (i = 0; i < sg_count; i++)
1104 request->data_buffer.pfn_array[i] =
1105 page_to_pfn(sg_page((&sgl[i])));
1106
1107 } else if (scsi_sglist(scmnd)) {
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001108 request->data_buffer.offset =
1109 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1110 request->data_buffer.pfn_array[0] =
1111 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1112 }
1113
1114retry_request:
1115 /* Invokes the vsc to start an IO */
K. Y. Srinivasan636f0fd2011-05-10 07:54:50 -07001116 ret = storvsc_do_io(dev, &cmd_request->request);
1117
K. Y. Srinivasand2598f02011-08-25 09:48:58 -07001118 if (ret == -EAGAIN) {
K. Y. Srinivasanc5b463a2011-05-10 07:54:42 -07001119 /* no more space */
1120
1121 if (cmd_request->bounce_sgl_count) {
1122 /*
1123 * FIXME: We can optimize on writes by just skipping
1124 * this
1125 */
1126 copy_from_bounce_buffer(scsi_sglist(scmnd),
1127 cmd_request->bounce_sgl,
1128 scsi_sg_count(scmnd));
1129 destroy_bounce_buffer(cmd_request->bounce_sgl,
1130 cmd_request->bounce_sgl_count);
1131 }
1132
1133 kmem_cache_free(host_dev->request_pool, cmd_request);
1134
1135 scmnd->scsi_done = NULL;
1136 scmnd->host_scribble = NULL;
1137
1138 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1139 }
1140
1141 return ret;
1142}
1143
1144static DEF_SCSI_QCMD(storvsc_queuecommand)
1145
Hank Janssenbef4a342009-07-13 16:01:31 -07001146
Bill Pemberton454f18a2009-07-27 16:47:24 -04001147/* Scsi driver */
Hank Janssenbef4a342009-07-13 16:01:31 -07001148static struct scsi_host_template scsi_driver = {
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -07001149 .module = THIS_MODULE,
1150 .name = "storvsc_host_t",
1151 .bios_param = storvsc_get_chs,
1152 .queuecommand = storvsc_queuecommand,
1153 .eh_host_reset_handler = storvsc_host_reset_handler,
1154 .slave_alloc = storvsc_device_alloc,
1155 .slave_configure = storvsc_device_configure,
1156 .cmd_per_lun = 1,
1157 /* 64 max_queue * 1 target */
Lars Lindley0686e4f2010-03-11 23:51:23 +01001158 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -07001159 .this_id = -1,
Bill Pemberton454f18a2009-07-27 16:47:24 -04001160 /* no use setting to 0 since ll_blk_rw reset it to 1 */
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -07001161 /* currently 32 */
1162 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
1163 /*
1164 * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
1165 * into 1 sg element. If set, we must limit the max_segment_size to
1166 * PAGE_SIZE, otherwise we may get 1 sg element that represents
1167 * multiple
1168 */
Bill Pemberton454f18a2009-07-27 16:47:24 -04001169 /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -07001170 .use_clustering = ENABLE_CLUSTERING,
Bill Pemberton454f18a2009-07-27 16:47:24 -04001171 /* Make sure we dont get a sg segment crosses a page boundary */
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -07001172 .dma_boundary = PAGE_SIZE-1,
Hank Janssenbef4a342009-07-13 16:01:31 -07001173};
1174
K. Y. Srinivasan21e37742011-08-27 11:31:18 -07001175/*
1176 * The storvsc_probe function assumes that the IDE guid
1177 * is the second entry.
1178 */
K. Y. Srinivasand847b5f2011-08-25 09:48:33 -07001179static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001180 /* SCSI guid */
1181 { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
1182 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) },
K. Y. Srinivasan21e37742011-08-27 11:31:18 -07001183 /* IDE guid */
1184 { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
1185 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001186 { },
K. Y. Srinivasand847b5f2011-08-25 09:48:33 -07001187};
Hank Janssenbef4a342009-07-13 16:01:31 -07001188
K. Y. Srinivasand847b5f2011-08-25 09:48:33 -07001189MODULE_DEVICE_TABLE(vmbus, id_table);
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001190
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001191
Hank Janssen3e189512010-03-04 22:11:00 +00001192/*
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001193 * storvsc_probe - Add a new device for this driver
1194 */
1195
1196static int storvsc_probe(struct hv_device *device)
1197{
1198 int ret;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001199 struct Scsi_Host *host;
1200 struct hv_host_device *host_dev;
1201 struct storvsc_device_info device_info;
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001202 bool dev_is_ide;
1203 int path = 0;
1204 int target = 0;
1205
K. Y. Srinivasan21e37742011-08-27 11:31:18 -07001206 if (!memcmp(&device->dev_type.b, id_table[1].guid, sizeof(uuid_le)))
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001207 dev_is_ide = true;
1208 else
1209 dev_is_ide = false;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001210
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001211 host = scsi_host_alloc(&scsi_driver,
1212 sizeof(struct hv_host_device));
1213 if (!host)
1214 return -ENOMEM;
1215
1216 dev_set_drvdata(&device->device, host);
1217
1218 host_dev = (struct hv_host_device *)host->hostdata;
1219 memset(host_dev, 0, sizeof(struct hv_host_device));
1220
1221 host_dev->port = host->host_no;
1222 host_dev->dev = device;
1223
1224 host_dev->request_pool =
1225 kmem_cache_create(dev_name(&device->device),
1226 sizeof(struct storvsc_cmd_request), 0,
1227 SLAB_HWCACHE_ALIGN, NULL);
1228
1229 if (!host_dev->request_pool) {
1230 scsi_host_put(host);
1231 return -ENOMEM;
1232 }
1233
1234 device_info.port_number = host->host_no;
K. Y. Srinivasanfa4d1232011-05-10 07:56:01 -07001235 device_info.ring_buffer_size = storvsc_ringbuffer_size;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001236 /* Call to the vsc driver to add the device */
K. Y. Srinivasan58f1f5c2011-05-10 07:54:49 -07001237 ret = storvsc_dev_add(device, (void *)&device_info);
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001238
1239 if (ret != 0) {
1240 kmem_cache_destroy(host_dev->request_pool);
1241 scsi_host_put(host);
K. Y. Srinivasan8e854682011-08-25 09:49:09 -07001242 return -ENODEV;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001243 }
1244
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001245 if (dev_is_ide)
1246 storvsc_get_ide_info(device, &target, &path);
1247
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001248 host_dev->path = device_info.path_id;
1249 host_dev->target = device_info.target_id;
1250
1251 /* max # of devices per target */
1252 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
1253 /* max # of targets per channel */
1254 host->max_id = STORVSC_MAX_TARGETS;
1255 /* max # of channels */
1256 host->max_channel = STORVSC_MAX_CHANNELS - 1;
1257
1258 /* Register the HBA and start the scsi bus scan */
1259 ret = scsi_add_host(host, &device->device);
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001260 if (ret != 0)
1261 goto err_out;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001262
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001263 if (!dev_is_ide) {
1264 scsi_scan_host(host);
1265 return 0;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001266 }
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001267 ret = scsi_add_device(host, 0, target, 0);
1268 if (ret) {
1269 scsi_remove_host(host);
1270 goto err_out;
1271 }
1272 return 0;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001273
K. Y. Srinivasanbd1f5d62011-08-27 11:31:17 -07001274err_out:
1275 storvsc_dev_remove(device);
1276 kmem_cache_destroy(host_dev->request_pool);
1277 scsi_host_put(host);
1278 return -ENODEV;
K. Y. Srinivasanf5c78872011-05-10 07:54:43 -07001279}
1280
K. Y. Srinivasan7bd05b92011-05-10 07:54:45 -07001281/* The one and only one */
1282
K. Y. Srinivasan40bf63e2011-05-10 07:56:09 -07001283static struct hv_driver storvsc_drv = {
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001284 .name = "storvsc",
K. Y. Srinivasand847b5f2011-08-25 09:48:33 -07001285 .id_table = id_table,
K. Y. Srinivasan40bf63e2011-05-10 07:56:09 -07001286 .probe = storvsc_probe,
1287 .remove = storvsc_remove,
K. Y. Srinivasan39ae6fa2011-05-10 07:54:46 -07001288};
K. Y. Srinivasan7bd05b92011-05-10 07:54:45 -07001289
K. Y. Srinivasand9bbae82011-06-06 15:49:27 -07001290static int __init storvsc_drv_init(void)
Hank Janssenbef4a342009-07-13 16:01:31 -07001291{
K. Y. Srinivasan01415ab32011-05-10 07:55:58 -07001292 u32 max_outstanding_req_per_channel;
1293
1294 /*
1295 * Divide the ring buffer data size (which is 1 page less
1296 * than the ring buffer size since that page is reserved for
1297 * the ring buffer indices) by the max request size (which is
1298 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1299 */
K. Y. Srinivasan01415ab32011-05-10 07:55:58 -07001300 max_outstanding_req_per_channel =
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001301 ((storvsc_ringbuffer_size - PAGE_SIZE) /
1302 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1303 sizeof(struct vstor_packet) + sizeof(u64),
1304 sizeof(u64)));
Hank Janssenbef4a342009-07-13 16:01:31 -07001305
K. Y. Srinivasan01415ab32011-05-10 07:55:58 -07001306 if (max_outstanding_req_per_channel <
K. Y. Srinivasanf8feed02011-05-10 07:54:24 -07001307 STORVSC_MAX_IO_REQUESTS)
K. Y. Srinivasanb06efc12011-08-25 09:49:10 -07001308 return -EINVAL;
Hank Janssenbef4a342009-07-13 16:01:31 -07001309
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001310 return vmbus_driver_register(&storvsc_drv);
Hank Janssenbef4a342009-07-13 16:01:31 -07001311}
1312
K. Y. Srinivasanc63ba9e2011-06-06 15:49:26 -07001313static void __exit storvsc_drv_exit(void)
Hank Janssenbef4a342009-07-13 16:01:31 -07001314{
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001315 vmbus_driver_unregister(&storvsc_drv);
Hank Janssenbef4a342009-07-13 16:01:31 -07001316}
1317
Greg Kroah-Hartmanff568d32009-09-02 08:37:47 -07001318MODULE_LICENSE("GPL");
Hank Janssen26c14cc2010-02-11 23:02:42 +00001319MODULE_VERSION(HV_DRV_VERSION);
Stephen Hemminger3afc7cc32010-05-06 21:44:45 -07001320MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
K. Y. Srinivasand9bbae82011-06-06 15:49:27 -07001321module_init(storvsc_drv_init);
K. Y. Srinivasanc63ba9e2011-06-06 15:49:26 -07001322module_exit(storvsc_drv_exit);