blob: 451584447c35e1a3d9826cca90ffa7fb10a60914 [file] [log] [blame]
Hank Janssenbef4a342009-07-13 16:01:31 -07001/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 *
22 */
23
Greg Kroah-Hartman97f4ee32009-07-14 10:18:50 -070024#define KERNEL_2_6_27
Hank Janssenbef4a342009-07-13 16:01:31 -070025
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/blkdev.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_eh.h>
37#include <scsi/scsi_devinfo.h>
38
39#ifdef KERNEL_2_6_5
40#else
41#include <scsi/scsi_dbg.h>
42#endif
43
Greg Kroah-Hartman97f4ee32009-07-14 10:18:50 -070044#include "include/logging.h"
45#include "include/vmbus.h"
Hank Janssenbef4a342009-07-13 16:01:31 -070046
Greg Kroah-Hartman97f4ee32009-07-14 10:18:50 -070047#include "include/StorVscApi.h"
Hank Janssenbef4a342009-07-13 16:01:31 -070048
49//
50// #defines
51//
52
53//
54// Data types
55//
56struct host_device_context {
57 struct work_struct host_rescan_work; //must be 1st field
58 struct device_context *device_ctx; // point back to our device context
59#ifdef KERNEL_2_6_27
60 struct kmem_cache *request_pool;
61#else
62 kmem_cache_t *request_pool;
63#endif
64 unsigned int port;
65 unsigned char path;
66 unsigned char target;
67};
68
69struct storvsc_cmd_request {
70 struct list_head entry;
71 struct scsi_cmnd *cmd;
72
73 unsigned int bounce_sgl_count;
74 struct scatterlist *bounce_sgl;
75
76 STORVSC_REQUEST request;
77 // !!!DO NOT ADD ANYTHING BELOW HERE!!!
78 // The extension buffer falls right here and is pointed to by request.Extension;
79};
80
81struct storvsc_driver_context {
82 // !! These must be the first 2 fields !!
83 struct driver_context drv_ctx;
84 STORVSC_DRIVER_OBJECT drv_obj;
85};
86
87// Static decl
88static int storvsc_probe(struct device *dev);
89static int storvsc_queuecommand(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *));
90static int storvsc_device_alloc(struct scsi_device *);
91static int storvsc_device_configure(struct scsi_device *);
92static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd);
93#ifdef KERNEL_2_6_27
94static void storvsc_host_rescan_callback(struct work_struct *work);
95#else
96static void storvsc_host_rescan_callback(void* context);
97#endif
98static void storvsc_host_rescan(DEVICE_OBJECT* device_obj);
99static int storvsc_remove(struct device *dev);
100
101static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count, unsigned int len);
102static void destroy_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
103static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
104static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count);
105static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count);
106
107static int storvsc_report_luns(struct scsi_device *sdev, unsigned int luns[], unsigned int *lun_count);
108static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, sector_t capacity, int *info);
109
110
111static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
112
113// The one and only one
114static struct storvsc_driver_context g_storvsc_drv;
115
116// Scsi driver
117static struct scsi_host_template scsi_driver = {
118 .module = THIS_MODULE,
119 .name = "storvsc_host_t",
120 .bios_param = storvsc_get_chs,
121 .queuecommand = storvsc_queuecommand,
122 .eh_host_reset_handler = storvsc_host_reset_handler,
123 .slave_alloc = storvsc_device_alloc,
124 .slave_configure = storvsc_device_configure,
125 .cmd_per_lun = 1,
126 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, // 64 max_queue * 1 target
127 .this_id = -1,
128 // no use setting to 0 since ll_blk_rw reset it to 1
129 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,// currently 32
130 // ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge into 1 sg element. If set, we must
131 // limit the max_segment_size to PAGE_SIZE, otherwise we may get 1 sg element that represents multiple
132 // physically contig pfns (ie sg[x].length > PAGE_SIZE).
133 .use_clustering = ENABLE_CLUSTERING,
134 // Make sure we dont get a sg segment crosses a page boundary
135 .dma_boundary = PAGE_SIZE-1,
136};
137
138
139/*++
140
141Name: storvsc_drv_init()
142
143Desc: StorVsc driver initialization.
144
145--*/
146int storvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
147{
148 int ret=0;
149 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_storvsc_drv.drv_obj;
150 struct driver_context *drv_ctx=&g_storvsc_drv.drv_ctx;
151
152 DPRINT_ENTER(STORVSC_DRV);
153
154 vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
155
156 storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
157 storvsc_drv_obj->OnHostRescan = storvsc_host_rescan;
158
159 // Callback to client driver to complete the initialization
160 pfn_drv_init(&storvsc_drv_obj->Base);
161
162 DPRINT_INFO(STORVSC_DRV, "request extension size %u, max outstanding reqs %u", storvsc_drv_obj->RequestExtSize, storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
163
164 if (storvsc_drv_obj->MaxOutstandingRequestsPerChannel < STORVSC_MAX_IO_REQUESTS)
165 {
166 DPRINT_ERR(STORVSC_DRV, "The number of outstanding io requests (%d) is larger than that supported (%d) internally.",
167 STORVSC_MAX_IO_REQUESTS, storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
168 return -1;
169 }
170
171 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
172 memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
173
174#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
175 drv_ctx->driver.probe = storvsc_probe;
176 drv_ctx->driver.remove = storvsc_remove;
177#else
178 drv_ctx->probe = storvsc_probe;
179 drv_ctx->remove = storvsc_remove;
180#endif
181
182 // The driver belongs to vmbus
183 vmbus_child_driver_register(drv_ctx);
184
185 DPRINT_EXIT(STORVSC_DRV);
186
187 return ret;
188}
189
190
191static int storvsc_drv_exit_cb(struct device *dev, void *data)
192{
193 struct device **curr = (struct device **)data;
194 *curr = dev;
195 return 1; // stop iterating
196}
197
198/*++
199
200Name: storvsc_drv_exit()
201
202Desc:
203
204--*/
205void storvsc_drv_exit(void)
206{
207 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_storvsc_drv.drv_obj;
208 struct driver_context *drv_ctx=&g_storvsc_drv.drv_ctx;
209
210 struct device *current_dev=NULL;
211
212#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
213#define driver_for_each_device(drv, start, data, fn) \
214 struct list_head *ptr, *n; \
215 list_for_each_safe(ptr, n, &((drv)->devices)) {\
216 struct device *curr_dev;\
217 curr_dev = list_entry(ptr, struct device, driver_list);\
218 fn(curr_dev, data);\
219 }
220#endif // KERNEL_2_6_9
221
222 DPRINT_ENTER(STORVSC_DRV);
223
224 while (1)
225 {
226 current_dev = NULL;
227
228 // Get the device
229 driver_for_each_device(&drv_ctx->driver, NULL, (void*)&current_dev, storvsc_drv_exit_cb);
230
231 if (current_dev == NULL)
232 break;
233
234 // Initiate removal from the top-down
235 device_unregister(current_dev);
236 }
237
238 if (storvsc_drv_obj->Base.OnCleanup)
239 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
240
241 vmbus_child_driver_unregister(drv_ctx);
242
243 DPRINT_EXIT(STORVSC_DRV);
244
245 return;
246}
247
248/*++
249
250Name: storvsc_probe()
251
252Desc: Add a new device for this driver
253
254--*/
255static int storvsc_probe(struct device *device)
256{
257 int ret=0;
258
259 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
260 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
261 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
262
263 struct device_context *device_ctx = device_to_device_context(device);
264 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
265
266 struct Scsi_Host *host;
267 struct host_device_context *host_device_ctx;
268 STORVSC_DEVICE_INFO device_info;
269
270 DPRINT_ENTER(STORVSC_DRV);
271
272 if (!storvsc_drv_obj->Base.OnDeviceAdd)
273 return -1;
274
275 host = scsi_host_alloc(&scsi_driver, sizeof(struct host_device_context));
276 if (!host)
277 {
278 DPRINT_ERR(STORVSC_DRV, "unable to allocate scsi host object");
279 return -ENOMEM;
280 }
281
Greg Kroah-Hartman0883c522009-07-24 10:58:22 -0700282 dev_set_drvdata(device, host);
Hank Janssenbef4a342009-07-13 16:01:31 -0700283
284 host_device_ctx = (struct host_device_context*)host->hostdata;
285 memset(host_device_ctx, 0, sizeof(struct host_device_context));
286
287 host_device_ctx->port = host->host_no;
288 host_device_ctx->device_ctx = device_ctx;
289
290#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
291#elif defined(KERNEL_2_6_27)
292 INIT_WORK(&host_device_ctx->host_rescan_work, storvsc_host_rescan_callback);
293#else
294 INIT_WORK(&host_device_ctx->host_rescan_work, storvsc_host_rescan_callback, device_obj);
295#endif
296
297#if defined(KERNEL_2_6_27)
298 host_device_ctx->request_pool =
299 kmem_cache_create
Greg Kroah-Hartman97f4ee32009-07-14 10:18:50 -0700300 (dev_name(&device_ctx->device),
Hank Janssenbef4a342009-07-13 16:01:31 -0700301 sizeof(struct storvsc_cmd_request) + storvsc_drv_obj->RequestExtSize,
302 0,
303 SLAB_HWCACHE_ALIGN, NULL);
304#else
305 host_device_ctx->request_pool =
306 kmem_cache_create
307 (device_ctx->device.bus_id,
308 sizeof(struct storvsc_cmd_request) + storvsc_drv_obj->RequestExtSize,
309 0,
310 SLAB_HWCACHE_ALIGN, NULL, NULL);
311#endif
312
313 if (!host_device_ctx->request_pool)
314 {
315 scsi_host_put(host);
316 DPRINT_EXIT(STORVSC_DRV);
317
318 return -ENOMEM;
319 }
320
321 device_info.PortNumber = host->host_no;
322 // Call to the vsc driver to add the device
323 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, (void*)&device_info);
324 if (ret != 0)
325 {
326 DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
327 kmem_cache_destroy(host_device_ctx->request_pool);
328 scsi_host_put(host);
329 DPRINT_EXIT(STORVSC_DRV);
330
331 return -1;
332 }
333
334 //host_device_ctx->port = device_info.PortNumber;
335 host_device_ctx->path = device_info.PathId;
336 host_device_ctx->target = device_info.TargetId;
337
338 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; // max # of devices per target
339 host->max_id = STORVSC_MAX_TARGETS; // max # of targets per channel
340 host->max_channel = STORVSC_MAX_CHANNELS -1; // max # of channels
341
342 // Register the HBA and start the scsi bus scan
343 ret = scsi_add_host(host, device);
344 if (ret != 0)
345 {
346 DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
347
348 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
349
350 kmem_cache_destroy(host_device_ctx->request_pool);
351 scsi_host_put(host);
352 DPRINT_EXIT(STORVSC_DRV);
353
354 return -1;
355 }
356
357 scsi_scan_host(host);
358
359 DPRINT_EXIT(STORVSC_DRV);
360
361 return ret;
362}
363
364
365/*++
366
367Name: storvsc_remove()
368
369Desc: Callback when our device is removed
370
371--*/
372static int storvsc_remove(struct device *device)
373{
374 int ret=0;
375
376 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
377 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
378 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
379
380 struct device_context *device_ctx = device_to_device_context(device);
381 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
382
Greg Kroah-Hartman0883c522009-07-24 10:58:22 -0700383 struct Scsi_Host *host = dev_get_drvdata(device);
Hank Janssenbef4a342009-07-13 16:01:31 -0700384 struct host_device_context *host_device_ctx=(struct host_device_context*)host->hostdata;
385
386
387 DPRINT_ENTER(STORVSC_DRV);
388
389 if (!storvsc_drv_obj->Base.OnDeviceRemove)
390 {
391 DPRINT_EXIT(STORVSC_DRV);
392 return -1;
393 }
394
395 // Call to the vsc driver to let it know that the device is being removed
396 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
397 if (ret != 0)
398 {
399 // TODO:
400 DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)", ret);
401 }
402
403 if (host_device_ctx->request_pool)
404 {
405 kmem_cache_destroy(host_device_ctx->request_pool);
406 host_device_ctx->request_pool = NULL;
407 }
408
409 DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
410 scsi_remove_host(host);
411
412 DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
413 scsi_host_put(host);
414
415 DPRINT_EXIT(STORVSC_DRV);
416
417 return ret;
418}
419
420/*++
421
422Name: storvsc_commmand_completion()
423
424Desc: Command completion processing
425
426--*/
427static void storvsc_commmand_completion(STORVSC_REQUEST* request)
428{
429 struct storvsc_cmd_request *cmd_request = (struct storvsc_cmd_request*)request->Context;
430 struct scsi_cmnd *scmnd = cmd_request->cmd;
431 struct host_device_context *host_device_ctx = (struct host_device_context*)scmnd->device->host->hostdata;
432 void (*scsi_done_fn)(struct scsi_cmnd *);
433#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
434#else
435 struct scsi_sense_hdr sense_hdr;
436#endif
437
438 ASSERT(request == &cmd_request->request);
439 ASSERT((unsigned long)scmnd->host_scribble == (unsigned long)cmd_request);
440 ASSERT(scmnd);
441 ASSERT(scmnd->scsi_done);
442
443 DPRINT_ENTER(STORVSC_DRV);
444
445 if (cmd_request->bounce_sgl_count)// using bounce buffer
446 {
447 //printk("copy_from_bounce_buffer\n");
448
449 // FIXME: We can optimize on writes by just skipping this
450#ifdef KERNEL_2_6_27
451 copy_from_bounce_buffer(scsi_sglist(scmnd), cmd_request->bounce_sgl, scsi_sg_count(scmnd));
452#else
453 copy_from_bounce_buffer(scmnd->request_buffer, cmd_request->bounce_sgl, scmnd->use_sg);
454#endif
455 destroy_bounce_buffer(cmd_request->bounce_sgl, cmd_request->bounce_sgl_count);
456 }
457
458 scmnd->result = request->Status;
459
460 if (scmnd->result)
461 {
462#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
463 DPRINT_INFO(STORVSC_DRV, "scsi result nonzero - %d", scmnd->result);
464#else
465 if (scsi_normalize_sense(scmnd->sense_buffer, request->SenseBufferSize, &sense_hdr))
466 {
467 scsi_print_sense_hdr("storvsc", &sense_hdr);
468 }
469#endif
470 }
471
472 ASSERT(request->BytesXfer <= request->DataBuffer.Length);
473#ifdef KERNEL_2_6_27
474 scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
475#else
476 scmnd->resid = request->DataBuffer.Length - request->BytesXfer;
477#endif
478
479 scsi_done_fn = scmnd->scsi_done;
480
481 scmnd->host_scribble = NULL;
482 scmnd->scsi_done = NULL;
483
484 // !!DO NOT MODIFY the scmnd after this call
485 scsi_done_fn(scmnd);
486
487 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
488
489 DPRINT_EXIT(STORVSC_DRV);
490}
491
492static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
493{
494 int i=0;
495
496 // No need to check
497 if (sg_count < 2)
498 return -1;
499
500 // We have at least 2 sg entries
501 for ( i=0; i<sg_count; i++ )
502 {
503 if (i == 0) // make sure 1st one does not have hole
504 {
505 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
506 return i;
507 }
508 else if (i == sg_count - 1) // make sure last one does not have hole
509 {
510 if (sgl[i].offset != 0)
511 return i;
512 }
513 else // make sure no hole in the middle
514 {
515 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
516 {
517 return i;
518 }
519 }
520 }
521 return -1;
522}
523
524static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count, unsigned int len)
525{
526 int i;
527 int num_pages=0;
528 struct scatterlist* bounce_sgl;
529 struct page *page_buf;
530
531 num_pages = ALIGN_UP(len, PAGE_SIZE) >> PAGE_SHIFT;
532
533 bounce_sgl = kzalloc(num_pages * sizeof(struct scatterlist), GFP_ATOMIC);
534 if (!bounce_sgl)
535 {
536 return NULL;
537 }
538
539 for(i=0; i<num_pages; i++)
540 {
541 page_buf = alloc_page(GFP_ATOMIC);
542 if (!page_buf)
543 {
544 goto cleanup;
545 }
546#ifdef KERNEL_2_6_27
547 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
548#else
549 bounce_sgl[i].page = page_buf;
550 bounce_sgl[i].offset = 0;
551 bounce_sgl[i].length = 0;
552#endif
553 }
554
555 return bounce_sgl;
556
557cleanup:
558 destroy_bounce_buffer(bounce_sgl, num_pages);
559 return NULL;
560}
561
562static void destroy_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
563{
564 int i;
565 struct page *page_buf;
566
567 for (i=0; i<sg_count; i++)
568 {
569#ifdef KERNEL_2_6_27
570 if ((page_buf = sg_page((&sgl[i]))) != NULL)
571#else
572 if ((page_buf = sgl[i].page) != NULL)
573#endif
574
575 {
576 __free_page(page_buf);
577 }
578 }
579
580 kfree(sgl);
581}
582
583// Assume the bounce_sgl has enough room ie using the create_bounce_buffer()
584static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count)
585{
586 int i=0,j=0;
587 unsigned long src, dest;
588 unsigned int srclen, destlen, copylen;
589 unsigned int total_copied=0;
590 unsigned long bounce_addr=0;
591 unsigned long src_addr=0;
592 unsigned long flags;
593
594 local_irq_save(flags);
595
596 for (i=0; i<orig_sgl_count; i++)
597 {
598#ifdef KERNEL_2_6_27
599 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), KM_IRQ0) + orig_sgl[i].offset;
600#else
601 src_addr = (unsigned long)kmap_atomic(orig_sgl[i].page, KM_IRQ0) + orig_sgl[i].offset;
602#endif
603 src = src_addr;
604 srclen = orig_sgl[i].length;
605
606 //if (PageHighMem(orig_sgl[i].page))
607 // printk("HighMem page detected - addr %p", (void*)src);
608
609 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
610
611 if (j == 0)
612 {
613#ifdef KERNEL_2_6_27
614 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
615#else
616 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
617#endif
618 }
619
620 while (srclen)
621 {
622 // assume bounce offset always == 0
623 dest = bounce_addr + bounce_sgl[j].length;
624 destlen = PAGE_SIZE - bounce_sgl[j].length;
625
Greg Kroah-Hartmanfc6a4b22009-07-15 11:05:14 -0700626 copylen = min(srclen, destlen);
Hank Janssenbef4a342009-07-13 16:01:31 -0700627 memcpy((void*)dest, (void*)src, copylen);
628
629 total_copied += copylen;
630 bounce_sgl[j].length += copylen;
631 srclen -= copylen;
632 src += copylen;
633
634 if (bounce_sgl[j].length == PAGE_SIZE) // full..move to next entry
635 {
636 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
637 j++;
638
639 // if we need to use another bounce buffer
640 if (srclen || i != orig_sgl_count -1)
641 {
642#ifdef KERNEL_2_6_27
643 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
644#else
645 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
646#endif
647 }
648 }
649 else if (srclen == 0 && i == orig_sgl_count -1) // // unmap the last bounce that is < PAGE_SIZE
650 {
651 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
652 }
653 }
654
655 kunmap_atomic((void*)(src_addr - orig_sgl[i].offset), KM_IRQ0);
656 }
657
658 local_irq_restore(flags);
659
660 return total_copied;
661}
662
663// Assume the original sgl has enough room
664static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, struct scatterlist *bounce_sgl, unsigned int orig_sgl_count)
665{
666 int i=0,j=0;
667 unsigned long src, dest;
668 unsigned int srclen, destlen, copylen;
669 unsigned int total_copied=0;
670 unsigned long bounce_addr=0;
671 unsigned long dest_addr=0;
672 unsigned long flags;
673
674 local_irq_save(flags);
675
676 for (i=0; i<orig_sgl_count; i++)
677 {
678#ifdef KERNEL_2_6_27
679 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), KM_IRQ0) + orig_sgl[i].offset;
680#else
681 dest_addr = (unsigned long)kmap_atomic(orig_sgl[i].page, KM_IRQ0) + orig_sgl[i].offset;
682#endif
683 dest = dest_addr;
684 destlen = orig_sgl[i].length;
685 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
686
687 if (j == 0)
688 {
689#ifdef KERNEL_2_6_27
690 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
691#else
692 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
693#endif
694 }
695
696 while (destlen)
697 {
698 src = bounce_addr + bounce_sgl[j].offset;
699 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
700
Greg Kroah-Hartmanfc6a4b22009-07-15 11:05:14 -0700701 copylen = min(srclen, destlen);
Hank Janssenbef4a342009-07-13 16:01:31 -0700702 memcpy((void*)dest, (void*)src, copylen);
703
704 total_copied += copylen;
705 bounce_sgl[j].offset += copylen;
706 destlen -= copylen;
707 dest += copylen;
708
709 if (bounce_sgl[j].offset == bounce_sgl[j].length) // full
710 {
711 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
712 j++;
713
714 // if we need to use another bounce buffer
715 if (destlen || i != orig_sgl_count -1)
716 {
717#ifdef KERNEL_2_6_27
718 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
719#else
720 bounce_addr = (unsigned long)kmap_atomic(bounce_sgl[j].page, KM_IRQ0);
721#endif
722 }
723 }
724 else if (destlen == 0 && i == orig_sgl_count -1) // unmap the last bounce that is < PAGE_SIZE
725 {
726 kunmap_atomic((void*)bounce_addr, KM_IRQ0);
727 }
728 }
729
730 kunmap_atomic((void*)(dest_addr - orig_sgl[i].offset), KM_IRQ0);
731 }
732
733 local_irq_restore(flags);
734
735 return total_copied;
736}
737
738
739/*++
740
741Name: storvsc_queuecommand()
742
743Desc: Initiate command processing
744
745--*/
746static int storvsc_queuecommand(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *))
747{
748 int ret=0;
749 struct host_device_context *host_device_ctx = (struct host_device_context*)scmnd->device->host->hostdata;
750 struct device_context *device_ctx=host_device_ctx->device_ctx;
751 struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
752 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
753 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
754
755 STORVSC_REQUEST *request;
756 struct storvsc_cmd_request *cmd_request;
757 unsigned int request_size=0;
758 int i;
759 struct scatterlist *sgl;
760
761 DPRINT_ENTER(STORVSC_DRV);
762
763#ifdef KERNEL_2_6_27
764 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d queue depth %d tagged %d",
765 scmnd,
766 scmnd->sc_data_direction,
767 scsi_sg_count(scmnd),
768 scsi_sglist(scmnd),
769 scsi_bufflen(scmnd),
770 scmnd->device->queue_depth,
771 scmnd->device->tagged_supported);
772#else
773 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d queue depth %d tagged %d",
774 scmnd,
775 scmnd->sc_data_direction,
776 scmnd->use_sg,
777 scmnd->request_buffer,
778 scmnd->request_bufflen,
779 scmnd->device->queue_depth,
780 scmnd->device->tagged_supported);
781#endif
782
783 // If retrying, no need to prep the cmd
784 if (scmnd->host_scribble)
785 {
786 ASSERT(scmnd->scsi_done != NULL);
787
788 cmd_request = (struct storvsc_cmd_request* )scmnd->host_scribble;
789 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p", scmnd, cmd_request);
790
791 goto retry_request;
792 }
793
794 ASSERT(scmnd->scsi_done == NULL);
795 ASSERT(scmnd->host_scribble == NULL);
796
797 scmnd->scsi_done = done;
798
799 request_size = sizeof(struct storvsc_cmd_request);
800
801 cmd_request = kmem_cache_alloc(host_device_ctx->request_pool, GFP_ATOMIC);
802 if (!cmd_request)
803 {
804 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - unable to allocate storvsc_cmd_request...marking queue busy", scmnd);
805
806 scmnd->scsi_done = NULL;
807 return SCSI_MLQUEUE_DEVICE_BUSY;
808 }
809
810 // Setup the cmd request
811 cmd_request->bounce_sgl_count = 0;
812 cmd_request->bounce_sgl = NULL;
813 cmd_request->cmd = scmnd;
814
815 scmnd->host_scribble = (unsigned char*)cmd_request;
816
817 request = &cmd_request->request;
818
819 request->Extension = (void*)((unsigned long)cmd_request + request_size);
820 DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size, storvsc_drv_obj->RequestExtSize);
821
822 // Build the SRB
823 switch(scmnd->sc_data_direction)
824 {
825 case DMA_TO_DEVICE:
826 request->Type = WRITE_TYPE;
827 break;
828 case DMA_FROM_DEVICE:
829 request->Type = READ_TYPE;
830 break;
831 default:
832 request->Type = UNKNOWN_TYPE;
833 break;
834 }
835
836 request->OnIOCompletion = storvsc_commmand_completion;
837 request->Context = cmd_request;//scmnd;
838
839 //request->PortId = scmnd->device->channel;
840 request->Host = host_device_ctx->port;
841 request->Bus = scmnd->device->channel;
842 request->TargetId = scmnd->device->id;
843 request->LunId = scmnd->device->lun;
844
845 ASSERT(scmnd->cmd_len <= 16);
846 request->CdbLen = scmnd->cmd_len;
847 request->Cdb = scmnd->cmnd;
848
849 request->SenseBuffer = scmnd->sense_buffer;
850 request->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
851
852
853#ifdef KERNEL_2_6_27
854 request->DataBuffer.Length = scsi_bufflen(scmnd);
855 if (scsi_sg_count(scmnd))
856#else
857 request->DataBuffer.Length = scmnd->request_bufflen;
858 if (scmnd->use_sg)
859#endif
860 {
861#ifdef KERNEL_2_6_27
862 sgl = (struct scatterlist*)scsi_sglist(scmnd);
863#else
864 sgl = (struct scatterlist*)(scmnd->request_buffer);
865#endif
866
867 // check if we need to bounce the sgl
868#ifdef KERNEL_2_6_27
869 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1)
870#else
871 if (do_bounce_buffer(sgl, scmnd->use_sg) != -1)
872#endif
873 {
874 DPRINT_INFO(STORVSC_DRV, "need to bounce buffer for this scmnd %p", scmnd);
875#ifdef KERNEL_2_6_27
876 cmd_request->bounce_sgl = create_bounce_buffer(sgl, scsi_sg_count(scmnd), scsi_bufflen(scmnd));
877#else
878 cmd_request->bounce_sgl = create_bounce_buffer(
879 sgl,
880 scmnd->use_sg, scmnd->request_bufflen);
881#endif
882 if (!cmd_request->bounce_sgl)
883 {
884 DPRINT_ERR(STORVSC_DRV, "unable to create bounce buffer for this scmnd %p", scmnd);
885
886 scmnd->scsi_done = NULL;
887 scmnd->host_scribble = NULL;
888 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
889
890 return SCSI_MLQUEUE_HOST_BUSY;
891 }
892
893#ifdef KERNEL_2_6_27
894 cmd_request->bounce_sgl_count = ALIGN_UP(scsi_bufflen(scmnd), PAGE_SIZE) >> PAGE_SHIFT;
895#else
896 cmd_request->bounce_sgl_count = ALIGN_UP(scmnd->request_bufflen, PAGE_SIZE) >> PAGE_SHIFT;
897#endif
898
899 //printk("bouncing buffer allocated %p original buffer %p\n", bounce_sgl, sgl);
900 //printk("copy_to_bounce_buffer\n");
901 // FIXME: We can optimize on reads by just skipping this
902#ifdef KERNEL_2_6_27
903 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl, scsi_sg_count(scmnd));
904#else
905 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl, scmnd->use_sg);
906#endif
907
908 sgl = cmd_request->bounce_sgl;
909 }
910
911 request->DataBuffer.Offset = sgl[0].offset;
912
913#ifdef KERNEL_2_6_27
914 for (i = 0; i < scsi_sg_count(scmnd); i++ )
915#else
916 for (i = 0; i < scmnd->use_sg; i++ )
917#endif
918 {
919 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n", i, sgl[i].length, sgl[i].offset);
920#ifdef KERNEL_2_6_27
921 request->DataBuffer.PfnArray[i] = page_to_pfn(sg_page((&sgl[i])));
922#else
923 request->DataBuffer.PfnArray[i] = page_to_pfn(sgl[i].page);
924#endif
925 }
926 }
927
928#ifdef KERNEL_2_6_27
929 else if (scsi_sglist(scmnd))
930 {
931 ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE);
932 request->DataBuffer.Offset = virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
933 request->DataBuffer.PfnArray[0] = virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
934 }
935 else
936 {
937 ASSERT(scsi_bufflen(scmnd) == 0);
938 }
939#else
940 else if (scmnd->request_buffer)
941 {
942 ASSERT(scmnd->request_bufflen <= PAGE_SIZE);
943 request->DataBuffer.Offset = virt_to_phys(scmnd->request_buffer) & (PAGE_SIZE-1);
944 request->DataBuffer.PfnArray[0] = virt_to_phys(scmnd->request_buffer) >> PAGE_SHIFT;
945 }
946 else
947 {
948 ASSERT(scmnd->request_bufflen == 0);
949 }
950#endif
951
952retry_request:
953
954 // Invokes the vsc to start an IO
955 ret = storvsc_drv_obj->OnIORequest(&device_ctx->device_obj, &cmd_request->request);
956 if (ret == -1) // no more space
957 {
958 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - queue FULL...marking queue busy", scmnd);
959
960 if (cmd_request->bounce_sgl_count)
961 {
962 // FIXME: We can optimize on writes by just skipping this
963#ifdef KERNEL_2_6_27
964 copy_from_bounce_buffer(scsi_sglist(scmnd), cmd_request->bounce_sgl, scsi_sg_count(scmnd));
965#else
966 copy_from_bounce_buffer(
967 scmnd->request_buffer,
968 cmd_request->bounce_sgl,
969 scmnd->use_sg);
970#endif
971 destroy_bounce_buffer(cmd_request->bounce_sgl, cmd_request->bounce_sgl_count);
972 }
973
974 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
975
976 scmnd->scsi_done = NULL;
977 scmnd->host_scribble = NULL;
978
979 ret = SCSI_MLQUEUE_DEVICE_BUSY;
980 }
981
982 DPRINT_EXIT(STORVSC_DRV);
983
984 return ret;
985}
986
987#ifdef KERNEL_2_6_27
988static int storvsc_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, struct bio_vec *bvec)
989{
990 return bvec->bv_len; //checking done by caller.
991}
992#else
993static int storvsc_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
994{
995 // Check if we are adding a new bvec
996 if (bio->bi_vcnt > 0)
997 {
998 //printk("storvsc_merge_bvec() - cnt %u offset %u len %u\n", bio->bi_vcnt, bvec->bv_offset, bvec->bv_len);
999
1000 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
1001 if (bvec == prev)
1002 return bvec->bv_len; // success
1003
1004 // Adding new bvec. Make sure the prev one is a complete page
1005 if (prev->bv_len == PAGE_SIZE && prev->bv_offset == 0)
1006 {
1007 return bvec->bv_len; // success
1008 }
1009 else
1010 {
1011 // Dont reject if the new bvec starts off from the prev one since
1012 // they will be merge into 1 bvec or blk_rq_map_sg() will merge them into 1 sg element
1013 if ((bvec->bv_page == prev->bv_page) &&
1014 (bvec->bv_offset == prev->bv_offset + prev->bv_len))
1015 {
1016 return bvec->bv_len; // success
1017 }
1018 else
1019 {
1020 DPRINT_INFO(STORVSC_DRV, "detected holes in bio request (%p) - cnt %u offset %u len %u", bio, bio->bi_vcnt, bvec->bv_offset, bvec->bv_len);
1021 return 0; // dont add the bvec to this bio since we dont allow holes in the middle of a multi-pages bio
1022 }
1023 }
1024 }
1025
1026 return bvec->bv_len; // success
1027
1028}
1029
1030#endif
1031
1032/*++
1033
1034Name: storvsc_device_configure()
1035
1036Desc: Configure the specified scsi device
1037
1038--*/
1039static int storvsc_device_alloc(struct scsi_device *sdevice)
1040{
1041#ifdef KERNEL_2_6_5
1042#else
1043 DPRINT_DBG(STORVSC_DRV, "sdev (%p) - setting device flag to %d", sdevice, BLIST_SPARSELUN);
1044 // This enables luns to be located sparsely. Otherwise, we may not discovered them.
1045 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
1046#endif
1047 return 0;
1048}
1049
1050static int storvsc_device_configure(struct scsi_device *sdevice)
1051{
1052 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - curr queue depth %d", sdevice, sdevice->queue_depth);
1053
1054 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting queue depth to %d", sdevice, STORVSC_MAX_IO_REQUESTS);
1055 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG, STORVSC_MAX_IO_REQUESTS);
1056
Greg Kroah-Hartman2701f682009-07-16 12:36:37 -07001057 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld", sdevice, PAGE_SIZE);
Hank Janssenbef4a342009-07-13 16:01:31 -07001058 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1059
1060 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine", sdevice);
1061 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
1062
1063 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1064 //sdevice->timeout = (2000 * HZ);//(75 * HZ);
1065
1066 return 0;
1067}
1068
1069/*++
1070
1071Name: storvsc_host_reset_handler()
1072
1073Desc: Reset the scsi HBA
1074
1075--*/
1076static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1077{
1078 int ret=SUCCESS;
1079 struct host_device_context *host_device_ctx = (struct host_device_context*)scmnd->device->host->hostdata;
1080 struct device_context *device_ctx = host_device_ctx->device_ctx;
1081 struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
1082 struct storvsc_driver_context *storvsc_drv_ctx = (struct storvsc_driver_context*)driver_ctx;
1083
1084 STORVSC_DRIVER_OBJECT *storvsc_drv_obj = &storvsc_drv_ctx->drv_obj;
1085
1086 DPRINT_ENTER(STORVSC_DRV);
1087
1088 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...", scmnd->device, &device_ctx->device_obj);
1089
1090 // Invokes the vsc to reset the host/bus
1091 ASSERT(storvsc_drv_obj->OnHostReset);
1092 ret = storvsc_drv_obj->OnHostReset(&device_ctx->device_obj);
1093 if (ret != 0)
1094 {
1095 DPRINT_EXIT(STORVSC_DRV);
1096 return ret;
1097 }
1098
1099 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted", scmnd->device, &device_ctx->device_obj);
1100
1101 DPRINT_EXIT(STORVSC_DRV);
1102
1103 return ret;
1104}
1105
1106/*++
1107
1108Name: storvsc_host_rescan
1109
1110Desc: Rescan the scsi HBA
1111
1112--*/
1113#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
1114#else
1115
1116#ifdef KERNEL_2_6_27
1117static void storvsc_host_rescan_callback(struct work_struct *work)
1118{
1119 DEVICE_OBJECT* device_obj =
1120 &((struct host_device_context*)work)->device_ctx->device_obj;
1121#else
1122static void storvsc_host_rescan_callback(void* context)
1123{
1124
1125 DEVICE_OBJECT* device_obj = (DEVICE_OBJECT*)context;
1126#endif
1127 struct device_context* device_ctx = to_device_context(device_obj);
Greg Kroah-Hartman0883c522009-07-24 10:58:22 -07001128 struct Scsi_Host *host = dev_get_drvdata(&device_ctx->device);
Hank Janssenbef4a342009-07-13 16:01:31 -07001129 struct scsi_device *sdev;
1130 struct host_device_context *host_device_ctx;
1131 struct scsi_device **sdevs_remove_list;
1132 unsigned int sdevs_count=0;
1133 unsigned int found;
1134 unsigned int i;
1135 unsigned int lun_count=0;
1136 unsigned int *lun_list;
1137
1138 DPRINT_ENTER(STORVSC_DRV);
1139
1140 host_device_ctx = (struct host_device_context*)host->hostdata;
1141 lun_list = kzalloc(sizeof(unsigned int)*STORVSC_MAX_LUNS_PER_TARGET, GFP_ATOMIC);
1142 if (!lun_list)
1143 {
1144 DPRINT_ERR(STORVSC_DRV, "unable to allocate lun list");
1145 return;
1146 }
1147
1148 sdevs_remove_list = kzalloc(sizeof(void*)*STORVSC_MAX_LUNS_PER_TARGET, GFP_ATOMIC);
1149 if (!sdevs_remove_list)
1150 {
1151 kfree(lun_list);
1152 DPRINT_ERR(STORVSC_DRV, "unable to allocate lun remove list");
1153 return;
1154 }
1155
1156 DPRINT_INFO(STORVSC_DRV, "rescanning host for new scsi devices...", device_obj, host_device_ctx->target, host_device_ctx->path);
1157
1158 // Rescan for new device
1159 scsi_scan_target(&host->shost_gendev, host_device_ctx->path, host_device_ctx->target, SCAN_WILD_CARD, 1);
1160
1161 DPRINT_INFO(STORVSC_DRV, "rescanning host for removed scsi device...");
1162
1163 // Use the 1st device to send the report luns cmd
1164 shost_for_each_device(sdev, host)
1165 {
1166 lun_count=STORVSC_MAX_LUNS_PER_TARGET;
1167 storvsc_report_luns(sdev, lun_list, &lun_count);
1168
1169 DPRINT_INFO(STORVSC_DRV, "report luns on scsi device (%p) found %u luns ", sdev, lun_count);
1170 DPRINT_INFO(STORVSC_DRV, "existing luns on scsi device (%p) host (%d)", sdev, host->host_no);
1171
1172 scsi_device_put(sdev);
1173 break;
1174 }
1175
1176 for (i=0; i<lun_count; i++)
1177 {
1178 DPRINT_INFO(STORVSC_DRV, "%d) lun %u", i, lun_list[i]);
1179 }
1180
1181 // Rescan for devices that may have been removed.
1182 // We do not have to worry that new devices may have been added since
1183 // this callback is serialized by the workqueue ie add/remove are done here.
1184 shost_for_each_device(sdev, host)
1185 {
1186 // See if this device is still here
1187 found = 0;
1188 for (i=0; i<lun_count; i++)
1189 {
1190 if (sdev->lun == lun_list[i])
1191 {
1192 found = 1;
1193 break;
1194 }
1195 }
1196 if (!found)
1197 {
1198 DPRINT_INFO(STORVSC_DRV, "lun (%u) does not exists", sdev->lun);
1199 sdevs_remove_list[sdevs_count++] = sdev;
1200 }
1201 }
1202
1203 // Now remove the devices
1204 for (i=0; i< sdevs_count; i++)
1205 {
1206 DPRINT_INFO(STORVSC_DRV, "removing scsi device (%p) lun (%u)...",
1207 sdevs_remove_list[i], sdevs_remove_list[i]->lun);
1208
1209 // make sure it is not removed from underneath us
1210 if (!scsi_device_get(sdevs_remove_list[i]))
1211 {
1212 scsi_remove_device(sdevs_remove_list[i]);
1213 scsi_device_put(sdevs_remove_list[i]);
1214 }
1215 }
1216
1217 DPRINT_INFO(STORVSC_DRV, "rescan completed on dev obj (%p) target (%u) bus (%u)", device_obj, host_device_ctx->target, host_device_ctx->path);
1218
1219 kfree(lun_list);
1220 kfree(sdevs_remove_list);
1221
1222 DPRINT_EXIT(STORVSC_DRV);
1223}
1224
1225static int storvsc_report_luns(struct scsi_device *sdev, unsigned int luns[], unsigned int *lun_count)
1226{
1227 int i,j;
1228 unsigned int lun=0;
1229 unsigned int num_luns;
1230 int result;
1231 unsigned char *data;
1232#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
1233#else
1234 struct scsi_sense_hdr sshdr;
1235#endif
1236 unsigned char cmd[16]={0};
1237 unsigned int report_len = 8*(STORVSC_MAX_LUNS_PER_TARGET+1); // Add 1 to cover the report_lun header
1238 unsigned long long *report_luns;
1239 const unsigned int in_lun_count = *lun_count;
1240
1241 *lun_count = 0;
1242
1243 report_luns = kzalloc(report_len, GFP_ATOMIC);
1244 if (!report_luns)
1245 {
1246 return -ENOMEM;
1247 }
1248
1249 cmd[0] = REPORT_LUNS;
1250
1251 // cmd length
1252 *(unsigned int*)&cmd[6] = cpu_to_be32(report_len);
1253
Greg Kroah-Hartman97f4ee32009-07-14 10:18:50 -07001254 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, (unsigned char*)report_luns, report_len, &sshdr, 30*HZ, 3, NULL);
Hank Janssenbef4a342009-07-13 16:01:31 -07001255 if (result != 0)
1256 {
1257 kfree(report_luns);
1258 return -EBUSY;
1259 }
1260
1261 // get the length from the first four bytes
1262 report_len = be32_to_cpu(*(unsigned int*)&report_luns[0]);
1263
1264 num_luns = (report_len / sizeof(unsigned long long));
1265 if (num_luns > in_lun_count)
1266 {
1267 kfree(report_luns);
1268 return -EINVAL;
1269 }
1270
1271 *lun_count = num_luns;
1272
1273 DPRINT_DBG(STORVSC_DRV, "report luns on scsi device (%p) found %u luns ", sdev, num_luns);
1274
1275 // lun id starts at 1
1276 for (i=1; i< num_luns+1; i++)
1277 {
1278 lun = 0;
1279 data = (unsigned char*)&report_luns[i];
1280 for (j = 0; j < sizeof(lun); j += 2)
1281 {
1282 lun = lun | (((data[j] << 8) | data[j + 1]) << (j * 8));
1283 }
1284
1285 luns[i-1] = lun;
1286 }
1287
1288 kfree(report_luns);
1289 return 0;
1290}
1291#endif // KERNEL_2_6_9
1292
1293static void storvsc_host_rescan(DEVICE_OBJECT* device_obj)
1294{
1295 struct device_context* device_ctx = to_device_context(device_obj);
Greg Kroah-Hartman0883c522009-07-24 10:58:22 -07001296 struct Scsi_Host *host = dev_get_drvdata(&device_ctx->device);
Hank Janssenbef4a342009-07-13 16:01:31 -07001297 struct host_device_context *host_device_ctx;
1298
1299 DPRINT_ENTER(STORVSC_DRV);
1300#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
1301 DPRINT_ERR(STORVSC_DRV, "rescan not supported on 2.6.9 kernels!! You will need to reboot if you have added or removed the scsi lun device");
1302#else
1303
1304 host_device_ctx = (struct host_device_context*)host->hostdata;
1305
1306 DPRINT_INFO(STORVSC_DRV, "initiating rescan on dev obj (%p) target (%u) bus (%u)...", device_obj, host_device_ctx->target, host_device_ctx->path);
1307
1308 // We need to queue this since the scanning may block and the caller may be in an intr context
1309 //scsi_queue_work(host, &host_device_ctx->host_rescan_work);
1310 schedule_work(&host_device_ctx->host_rescan_work);
1311#endif // KERNEL_2_6_9
1312 DPRINT_EXIT(STORVSC_DRV);
1313}
1314
1315static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, sector_t capacity, int *info)
1316{
1317 sector_t total_sectors = capacity;
1318 sector_t cylinder_times_heads=0;
1319 sector_t temp=0;
1320
1321 int sectors_per_track=0;
1322 int heads=0;
1323 int cylinders=0;
1324 int rem=0;
1325
1326 if (total_sectors > (65535 * 16 * 255)) {
1327 total_sectors = (65535 * 16 * 255);
1328 }
1329
1330 if (total_sectors >= (65535 * 16 * 63)) {
1331 sectors_per_track = 255;
1332 heads = 16;
1333
1334 cylinder_times_heads = total_sectors;
1335 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1336 }
1337 else
1338 {
1339 sectors_per_track = 17;
1340
1341 cylinder_times_heads = total_sectors;
1342 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1343
1344 temp = cylinder_times_heads + 1023;
1345 rem = sector_div(temp, 1024); // sector_div stores the quotient in temp
1346
1347 heads = temp;
1348
1349 if (heads < 4) {
1350 heads = 4;
1351 }
1352
1353 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1354 sectors_per_track = 31;
1355 heads = 16;
1356
1357 cylinder_times_heads = total_sectors;
1358 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1359 }
1360
1361 if (cylinder_times_heads >= (heads * 1024)) {
1362 sectors_per_track = 63;
1363 heads = 16;
1364
1365 cylinder_times_heads = total_sectors;
1366 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1367 }
1368 }
1369
1370 temp = cylinder_times_heads;
1371 rem = sector_div(temp, heads); // sector_div stores the quotient in temp
1372 cylinders = temp;
1373
1374 info[0] = heads;
1375 info[1] = sectors_per_track;
1376 info[2] = cylinders;
1377
1378 DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1379
1380 return 0;
1381}
1382
1383MODULE_LICENSE("GPL");
1384
1385static int __init storvsc_init(void)
1386{
1387 int ret;
1388
1389 DPRINT_ENTER(STORVSC_DRV);
1390
1391 DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
1392
1393 ret = storvsc_drv_init(StorVscInitialize);
1394
1395 DPRINT_EXIT(STORVSC_DRV);
1396
1397 return ret;
1398}
1399
1400static void __exit storvsc_exit(void)
1401{
1402 DPRINT_ENTER(STORVSC_DRV);
1403
1404 storvsc_drv_exit();
1405
1406 DPRINT_ENTER(STORVSC_DRV);
1407}
1408
1409module_param(storvsc_ringbuffer_size, int, S_IRUGO);
1410
1411module_init(storvsc_init);
1412module_exit(storvsc_exit);
1413
1414// eof