blob: 2e2362d787ca54037ea0687da2df08526173317c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/module.h>
38
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42////////////////////////////////////////////////////////////////
43
44#include <linux/ioctl.h> /* For SCSI-Passthrough */
45#include <asm/uaccess.h>
46
47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/pci.h> /* for PCI support */
50#include <linux/proc_fs.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h> /* for udelay */
53#include <linux/interrupt.h>
54#include <linux/kernel.h> /* for printk */
55#include <linux/sched.h>
56#include <linux/reboot.h>
57#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080058#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <linux/timer.h>
61#include <linux/string.h>
62#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010063#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#include <asm/processor.h> /* for boot_cpu_data */
66#include <asm/pgtable.h>
67#include <asm/io.h> /* for virt_to_bus, etc. */
68
69#include <scsi/scsi.h>
70#include <scsi/scsi_cmnd.h>
71#include <scsi/scsi_device.h>
72#include <scsi/scsi_host.h>
73#include <scsi/scsi_tcq.h>
74
75#include "dpt/dptsig.h"
76#include "dpti.h"
77
78/*============================================================================
79 * Create a binary signature - this is read by dptsig
80 * Needed for our management apps
81 *============================================================================
82 */
83static dpt_sig_S DPTI_sig = {
84 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
85#ifdef __i386__
86 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
87#elif defined(__ia64__)
88 PROC_INTEL, PROC_IA64,
89#elif defined(__sparc__)
90 PROC_ULTRASPARC, PROC_ULTRASPARC,
91#elif defined(__alpha__)
92 PROC_ALPHA, PROC_ALPHA,
93#else
94 (-1),(-1),
95#endif
96 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
97 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
98 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
99};
100
101
102
103
104/*============================================================================
105 * Globals
106 *============================================================================
107 */
108
Arjan van de Ven0b950672006-01-11 13:16:10 +0100109static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static struct i2o_sys_tbl *sys_tbl = NULL;
112static int sys_tbl_ind = 0;
113static int sys_tbl_len = 0;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static adpt_hba* hba_chain = NULL;
116static int hba_count = 0;
117
Arjan van de Ven00977a52007-02-12 00:55:34 -0800118static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .ioctl = adpt_ioctl,
120 .open = adpt_open,
121 .release = adpt_close
122};
123
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif
132
133/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description
135 * */
136struct adpt_i2o_post_wait_data
137{
138 int status;
139 u32 id;
140 adpt_wait_queue_head_t *wq;
141 struct adpt_i2o_post_wait_data *next;
142};
143
144static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
145static u32 adpt_post_wait_id = 0;
146static DEFINE_SPINLOCK(adpt_post_wait_lock);
147
148
149/*============================================================================
150 * Functions
151 *============================================================================
152 */
153
154static u8 adpt_read_blink_led(adpt_hba* host)
155{
156 if(host->FwDebugBLEDflag_P != 0) {
157 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
158 return readb(host->FwDebugBLEDvalue_P);
159 }
160 }
161 return 0;
162}
163
164/*============================================================================
165 * Scsi host template interface functions
166 *============================================================================
167 */
168
169static struct pci_device_id dptids[] = {
170 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
171 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { 0, }
173};
174MODULE_DEVICE_TABLE(pci,dptids);
175
176static int adpt_detect(struct scsi_host_template* sht)
177{
178 struct pci_dev *pDev = NULL;
179 adpt_hba* pHba;
180
181 adpt_init();
182
183 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184
185 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100186 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 if(pDev->device == PCI_DPT_DEVICE_ID ||
188 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
189 if(adpt_install_hba(sht, pDev) ){
190 PERROR("Could not Init an I2O RAID device\n");
191 PERROR("Will not try to detect others.\n");
192 return hba_count-1;
193 }
Alan Coxa07f3532006-09-15 15:34:32 +0100194 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 }
197
198 /* In INIT state, Activate IOPs */
199 for (pHba = hba_chain; pHba; pHba = pHba->next) {
200 // Activate does get status , init outbound, and get hrt
201 if (adpt_i2o_activate_hba(pHba) < 0) {
202 adpt_i2o_delete_hba(pHba);
203 }
204 }
205
206
207 /* Active IOPs in HOLD state */
208
209rebuild_sys_tab:
210 if (hba_chain == NULL)
211 return 0;
212
213 /*
214 * If build_sys_table fails, we kill everything and bail
215 * as we can't init the IOPs w/o a system table
216 */
217 if (adpt_i2o_build_sys_table() < 0) {
218 adpt_i2o_sys_shutdown();
219 return 0;
220 }
221
222 PDEBUG("HBA's in HOLD state\n");
223
224 /* If IOP don't get online, we need to rebuild the System table */
225 for (pHba = hba_chain; pHba; pHba = pHba->next) {
226 if (adpt_i2o_online_hba(pHba) < 0) {
227 adpt_i2o_delete_hba(pHba);
228 goto rebuild_sys_tab;
229 }
230 }
231
232 /* Active IOPs now in OPERATIONAL state */
233 PDEBUG("HBA's in OPERATIONAL state\n");
234
235 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
236 for (pHba = hba_chain; pHba; pHba = pHba->next) {
237 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
238 if (adpt_i2o_lct_get(pHba) < 0){
239 adpt_i2o_delete_hba(pHba);
240 continue;
241 }
242
243 if (adpt_i2o_parse_lct(pHba) < 0){
244 adpt_i2o_delete_hba(pHba);
245 continue;
246 }
247 adpt_inquiry(pHba);
248 }
249
250 for (pHba = hba_chain; pHba; pHba = pHba->next) {
251 if( adpt_scsi_register(pHba,sht) < 0){
252 adpt_i2o_delete_hba(pHba);
253 continue;
254 }
255 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET;
257 }
258
259 // Register our control device node
260 // nodes will need to be created in /dev to access this
261 // the nodes can not be created from within the driver
262 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
263 adpt_i2o_sys_shutdown();
264 return 0;
265 }
266 return hba_count;
267}
268
269
270/*
271 * scsi_unregister will be called AFTER we return.
272 */
273static int adpt_release(struct Scsi_Host *host)
274{
275 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
276// adpt_i2o_quiesce_hba(pHba);
277 adpt_i2o_delete_hba(pHba);
278 scsi_unregister(host);
279 return 0;
280}
281
282
283static void adpt_inquiry(adpt_hba* pHba)
284{
285 u32 msg[14];
286 u32 *mptr;
287 u32 *lenptr;
288 int direction;
289 int scsidir;
290 u32 len;
291 u32 reqlen;
292 u8* buf;
293 u8 scb[16];
294 s32 rcode;
295
296 memset(msg, 0, sizeof(msg));
Robert P. J. Day5cbded52006-12-13 00:35:56 -0800297 buf = kmalloc(80,GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 if(!buf){
299 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
300 return;
301 }
302 memset((void*)buf, 0, 36);
303
304 len = 36;
305 direction = 0x00000000;
306 scsidir =0x40000000; // DATA IN (iop<--dev)
307
308 reqlen = 14; // SINGLE SGE
309 /* Stick the headers on */
310 msg[0] = reqlen<<16 | SGL_OFFSET_12;
311 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
312 msg[2] = 0;
313 msg[3] = 0;
314 // Adaptec/DPT Private stuff
315 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
316 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
317 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
318 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
319 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
320 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
321 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
322
323 mptr=msg+7;
324
325 memset(scb, 0, sizeof(scb));
326 // Write SCSI command into the message - always 16 byte block
327 scb[0] = INQUIRY;
328 scb[1] = 0;
329 scb[2] = 0;
330 scb[3] = 0;
331 scb[4] = 36;
332 scb[5] = 0;
333 // Don't care about the rest of scb
334
335 memcpy(mptr, scb, sizeof(scb));
336 mptr+=4;
337 lenptr=mptr++; /* Remember me - fill in when we know */
338
339 /* Now fill in the SGList and command */
340 *lenptr = len;
341 *mptr++ = 0xD0000000|direction|len;
342 *mptr++ = virt_to_bus(buf);
343
344 // Send it on it's way
345 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
346 if (rcode != 0) {
347 sprintf(pHba->detail, "Adaptec I2O RAID");
348 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
349 if (rcode != -ETIME && rcode != -EINTR)
350 kfree(buf);
351 } else {
352 memset(pHba->detail, 0, sizeof(pHba->detail));
353 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
354 memcpy(&(pHba->detail[16]), " Model: ", 8);
355 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
356 memcpy(&(pHba->detail[40]), " FW: ", 4);
357 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
358 pHba->detail[48] = '\0'; /* precautionary */
359 kfree(buf);
360 }
361 adpt_i2o_status_get(pHba);
362 return ;
363}
364
365
366static int adpt_slave_configure(struct scsi_device * device)
367{
368 struct Scsi_Host *host = device->host;
369 adpt_hba* pHba;
370
371 pHba = (adpt_hba *) host->hostdata[0];
372
373 if (host->can_queue && device->tagged_supported) {
374 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
375 host->can_queue - 1);
376 } else {
377 scsi_adjust_queue_depth(device, 0, 1);
378 }
379 return 0;
380}
381
382static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
383{
384 adpt_hba* pHba = NULL;
385 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
387 cmd->scsi_done = done;
388 /*
389 * SCSI REQUEST_SENSE commands will be executed automatically by the
390 * Host Adapter for any errors, so they should not be executed
391 * explicitly unless the Sense Data is zero indicating that no error
392 * occurred.
393 */
394
395 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
396 cmd->result = (DID_OK << 16);
397 cmd->scsi_done(cmd);
398 return 0;
399 }
400
401 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
402 if (!pHba) {
403 return FAILED;
404 }
405
406 rmb();
407 /*
408 * TODO: I need to block here if I am processing ioctl cmds
409 * but if the outstanding cmds all finish before the ioctl,
410 * the scsi-core will not know to start sending cmds to me again.
411 * I need to a way to restart the scsi-cores queues or should I block
412 * calling scsi_done on the outstanding cmds instead
413 * for now we don't set the IOCTL state
414 */
415 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
416 pHba->host->last_reset = jiffies;
417 pHba->host->resetting = 1;
418 return 1;
419 }
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 // TODO if the cmd->device if offline then I may need to issue a bus rescan
422 // followed by a get_lct to see if the device is there anymore
423 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
424 /*
425 * First command request for this device. Set up a pointer
426 * to the device structure. This should be a TEST_UNIT_READY
427 * command from scan_scsis_single.
428 */
429 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
430 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
431 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
432 cmd->result = (DID_NO_CONNECT << 16);
433 cmd->scsi_done(cmd);
434 return 0;
435 }
436 cmd->device->hostdata = pDev;
437 }
438 pDev->pScsi_dev = cmd->device;
439
440 /*
441 * If we are being called from when the device is being reset,
442 * delay processing of the command until later.
443 */
444 if (pDev->state & DPTI_DEV_RESET ) {
445 return FAILED;
446 }
447 return adpt_scsi_to_i2o(pHba, cmd, pDev);
448}
449
450static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
451 sector_t capacity, int geom[])
452{
453 int heads=-1;
454 int sectors=-1;
455 int cylinders=-1;
456
457 // *** First lets set the default geometry ****
458
459 // If the capacity is less than ox2000
460 if (capacity < 0x2000 ) { // floppy
461 heads = 18;
462 sectors = 2;
463 }
464 // else if between 0x2000 and 0x20000
465 else if (capacity < 0x20000) {
466 heads = 64;
467 sectors = 32;
468 }
469 // else if between 0x20000 and 0x40000
470 else if (capacity < 0x40000) {
471 heads = 65;
472 sectors = 63;
473 }
474 // else if between 0x4000 and 0x80000
475 else if (capacity < 0x80000) {
476 heads = 128;
477 sectors = 63;
478 }
479 // else if greater than 0x80000
480 else {
481 heads = 255;
482 sectors = 63;
483 }
484 cylinders = sector_div(capacity, heads * sectors);
485
486 // Special case if CDROM
487 if(sdev->type == 5) { // CDROM
488 heads = 252;
489 sectors = 63;
490 cylinders = 1111;
491 }
492
493 geom[0] = heads;
494 geom[1] = sectors;
495 geom[2] = cylinders;
496
497 PDEBUG("adpt_bios_param: exit\n");
498 return 0;
499}
500
501
502static const char *adpt_info(struct Scsi_Host *host)
503{
504 adpt_hba* pHba;
505
506 pHba = (adpt_hba *) host->hostdata[0];
507 return (char *) (pHba->detail);
508}
509
510static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
511 int length, int inout)
512{
513 struct adpt_device* d;
514 int id;
515 int chan;
516 int len = 0;
517 int begin = 0;
518 int pos = 0;
519 adpt_hba* pHba;
520 int unit;
521
522 *start = buffer;
523 if (inout == TRUE) {
524 /*
525 * The user has done a write and wants us to take the
526 * data in the buffer and do something with it.
527 * proc_scsiwrite calls us with inout = 1
528 *
529 * Read data from buffer (writing to us) - NOT SUPPORTED
530 */
531 return -EINVAL;
532 }
533
534 /*
535 * inout = 0 means the user has done a read and wants information
536 * returned, so we write information about the cards into the buffer
537 * proc_scsiread() calls us with inout = 0
538 */
539
540 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100541 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 for (pHba = hba_chain; pHba; pHba = pHba->next) {
543 if (pHba->host == host) {
544 break; /* found adapter */
545 }
546 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100547 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (pHba == NULL) {
549 return 0;
550 }
551 host = pHba->host;
552
553 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
554 len += sprintf(buffer+len, "%s\n", pHba->detail);
555 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
556 pHba->host->host_no, pHba->name, host->irq);
557 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
558 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
559
560 pos = begin + len;
561
562 /* CHECKPOINT */
563 if(pos > offset + length) {
564 goto stop_output;
565 }
566 if(pos <= offset) {
567 /*
568 * If we haven't even written to where we last left
569 * off (the last time we were called), reset the
570 * beginning pointer.
571 */
572 len = 0;
573 begin = pos;
574 }
575 len += sprintf(buffer+len, "Devices:\n");
576 for(chan = 0; chan < MAX_CHANNEL; chan++) {
577 for(id = 0; id < MAX_ID; id++) {
578 d = pHba->channel[chan].device[id];
579 while(d){
580 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
581 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
582 pos = begin + len;
583
584
585 /* CHECKPOINT */
586 if(pos > offset + length) {
587 goto stop_output;
588 }
589 if(pos <= offset) {
590 len = 0;
591 begin = pos;
592 }
593
594 unit = d->pI2o_dev->lct_data.tid;
595 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
596 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
597 scsi_device_online(d->pScsi_dev)? "online":"offline");
598 pos = begin + len;
599
600 /* CHECKPOINT */
601 if(pos > offset + length) {
602 goto stop_output;
603 }
604 if(pos <= offset) {
605 len = 0;
606 begin = pos;
607 }
608
609 d = d->next_lun;
610 }
611 }
612 }
613
614 /*
615 * begin is where we last checked our position with regards to offset
616 * begin is always less than offset. len is relative to begin. It
617 * is the number of bytes written past begin
618 *
619 */
620stop_output:
621 /* stop the output and calculate the correct length */
622 *(buffer + len) = '\0';
623
624 *start = buffer + (offset - begin); /* Start of wanted data */
625 len -= (offset - begin);
626 if(len > length) {
627 len = length;
628 } else if(len < 0){
629 len = 0;
630 **start = '\0';
631 }
632 return len;
633}
634
635
636/*===========================================================================
637 * Error Handling routines
638 *===========================================================================
639 */
640
641static int adpt_abort(struct scsi_cmnd * cmd)
642{
643 adpt_hba* pHba = NULL; /* host bus adapter structure */
644 struct adpt_device* dptdevice; /* dpt per device information */
645 u32 msg[5];
646 int rcode;
647
648 if(cmd->serial_number == 0){
649 return FAILED;
650 }
651 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
652 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
653 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
654 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
655 return FAILED;
656 }
657
658 memset(msg, 0, sizeof(msg));
659 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
660 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
661 msg[2] = 0;
662 msg[3]= 0;
663 msg[4] = (u32)cmd;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800664 if (pHba->host)
665 spin_lock_irq(pHba->host->host_lock);
666 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
667 if (pHba->host)
668 spin_unlock_irq(pHba->host->host_lock);
669 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if(rcode == -EOPNOTSUPP ){
671 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
672 return FAILED;
673 }
674 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
675 return FAILED;
676 }
677 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
678 return SUCCESS;
679}
680
681
682#define I2O_DEVICE_RESET 0x27
683// This is the same for BLK and SCSI devices
684// NOTE this is wrong in the i2o.h definitions
685// This is not currently supported by our adapter but we issue it anyway
686static int adpt_device_reset(struct scsi_cmnd* cmd)
687{
688 adpt_hba* pHba;
689 u32 msg[4];
690 u32 rcode;
691 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700692 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 pHba = (void*) cmd->device->host->hostdata[0];
695 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
696 if (!d) {
697 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
698 return FAILED;
699 }
700 memset(msg, 0, sizeof(msg));
701 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
702 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
703 msg[2] = 0;
704 msg[3] = 0;
705
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800706 if (pHba->host)
707 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 old_state = d->state;
709 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800710 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
711 d->state = old_state;
712 if (pHba->host)
713 spin_unlock_irq(pHba->host->host_lock);
714 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 if(rcode == -EOPNOTSUPP ){
716 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
717 return FAILED;
718 }
719 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
720 return FAILED;
721 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
723 return SUCCESS;
724 }
725}
726
727
728#define I2O_HBA_BUS_RESET 0x87
729// This version of bus reset is called by the eh_error handler
730static int adpt_bus_reset(struct scsi_cmnd* cmd)
731{
732 adpt_hba* pHba;
733 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800734 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
737 memset(msg, 0, sizeof(msg));
738 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
739 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
740 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
741 msg[2] = 0;
742 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800743 if (pHba->host)
744 spin_lock_irq(pHba->host->host_lock);
745 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
746 if (pHba->host)
747 spin_unlock_irq(pHba->host->host_lock);
748 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
750 return FAILED;
751 } else {
752 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
753 return SUCCESS;
754 }
755}
756
757// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400758static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
760 adpt_hba* pHba;
761 int rcode;
762 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
763 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
764 rcode = adpt_hba_reset(pHba);
765 if(rcode == 0){
766 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
767 return SUCCESS;
768 } else {
769 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
770 return FAILED;
771 }
772}
773
Jeff Garzik df0ae242005-05-28 07:57:14 -0400774static int adpt_reset(struct scsi_cmnd* cmd)
775{
776 int rc;
777
778 spin_lock_irq(cmd->device->host->host_lock);
779 rc = __adpt_reset(cmd);
780 spin_unlock_irq(cmd->device->host->host_lock);
781
782 return rc;
783}
784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
786static int adpt_hba_reset(adpt_hba* pHba)
787{
788 int rcode;
789
790 pHba->state |= DPTI_STATE_RESET;
791
792 // Activate does get status , init outbound, and get hrt
793 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
794 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
795 adpt_i2o_delete_hba(pHba);
796 return rcode;
797 }
798
799 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
800 adpt_i2o_delete_hba(pHba);
801 return rcode;
802 }
803 PDEBUG("%s: in HOLD state\n",pHba->name);
804
805 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
806 adpt_i2o_delete_hba(pHba);
807 return rcode;
808 }
809 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
810
811 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
812 adpt_i2o_delete_hba(pHba);
813 return rcode;
814 }
815
816 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
817 adpt_i2o_delete_hba(pHba);
818 return rcode;
819 }
820 pHba->state &= ~DPTI_STATE_RESET;
821
822 adpt_fail_posted_scbs(pHba);
823 return 0; /* return success */
824}
825
826/*===========================================================================
827 *
828 *===========================================================================
829 */
830
831
832static void adpt_i2o_sys_shutdown(void)
833{
834 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100835 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
838 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
839 /* Delete all IOPs from the controller chain */
840 /* They should have already been released by the
841 * scsi-core
842 */
843 for (pHba = hba_chain; pHba; pHba = pNext) {
844 pNext = pHba->next;
845 adpt_i2o_delete_hba(pHba);
846 }
847
848 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849// spin_lock_irqsave(&adpt_post_wait_lock, flags);
850 /* Nothing should be outstanding at this point so just
851 * free them
852 */
Adrian Bunk458af542005-11-27 00:36:37 +0100853 for(p1 = adpt_post_wait_queue; p1;) {
854 old = p1;
855 p1 = p1->next;
856 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 }
858// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
859 adpt_post_wait_queue = NULL;
860
861 printk(KERN_INFO "Adaptec I2O controllers down.\n");
862}
863
864/*
865 * reboot/shutdown notification.
866 *
867 * - Quiesce each IOP in the system
868 *
869 */
870
871#ifdef REBOOT_NOTIFIER
872static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
873{
874
875 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
876 return NOTIFY_DONE;
877
878 adpt_i2o_sys_shutdown();
879
880 return NOTIFY_DONE;
881}
882#endif
883
884
885static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
886{
887
888 adpt_hba* pHba = NULL;
889 adpt_hba* p = NULL;
890 ulong base_addr0_phys = 0;
891 ulong base_addr1_phys = 0;
892 u32 hba_map0_area_size = 0;
893 u32 hba_map1_area_size = 0;
894 void __iomem *base_addr_virt = NULL;
895 void __iomem *msg_addr_virt = NULL;
896
897 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 if(pci_enable_device(pDev)) {
900 return -EINVAL;
901 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500902
903 if (pci_request_regions(pDev, "dpt_i2o")) {
904 PERROR("dpti: adpt_config_hba: pci request region failed\n");
905 return -EINVAL;
906 }
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 pci_set_master(pDev);
Matthias Gehre910638a2006-03-28 01:56:48 -0800909 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) &&
910 pci_set_dma_mask(pDev, DMA_32BIT_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -EINVAL;
912
913 base_addr0_phys = pci_resource_start(pDev,0);
914 hba_map0_area_size = pci_resource_len(pDev,0);
915
916 // Check if standard PCI card or single BAR Raptor
917 if(pDev->device == PCI_DPT_DEVICE_ID){
918 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
919 // Raptor card with this device id needs 4M
920 hba_map0_area_size = 0x400000;
921 } else { // Not Raptor - it is a PCI card
922 if(hba_map0_area_size > 0x100000 ){
923 hba_map0_area_size = 0x100000;
924 }
925 }
926 } else {// Raptor split BAR config
927 // Use BAR1 in this configuration
928 base_addr1_phys = pci_resource_start(pDev,1);
929 hba_map1_area_size = pci_resource_len(pDev,1);
930 raptorFlag = TRUE;
931 }
932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
934 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -0500935 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 PERROR("dpti: adpt_config_hba: io remap failed\n");
937 return -EINVAL;
938 }
939
940 if(raptorFlag == TRUE) {
941 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
942 if (!msg_addr_virt) {
943 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
944 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500945 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 return -EINVAL;
947 }
948 } else {
949 msg_addr_virt = base_addr_virt;
950 }
951
952 // Allocate and zero the data structure
953 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
954 if( pHba == NULL) {
955 if(msg_addr_virt != base_addr_virt){
956 iounmap(msg_addr_virt);
957 }
958 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500959 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 return -ENOMEM;
961 }
962 memset(pHba, 0, sizeof(adpt_hba));
963
Arjan van de Ven0b950672006-01-11 13:16:10 +0100964 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
966 if(hba_chain != NULL){
967 for(p = hba_chain; p->next; p = p->next);
968 p->next = pHba;
969 } else {
970 hba_chain = pHba;
971 }
972 pHba->next = NULL;
973 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -0700974 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 hba_count++;
976
Arjan van de Ven0b950672006-01-11 13:16:10 +0100977 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 pHba->pDev = pDev;
980 pHba->base_addr_phys = base_addr0_phys;
981
982 // Set up the Virtual Base Address of the I2O Device
983 pHba->base_addr_virt = base_addr_virt;
984 pHba->msg_addr_virt = msg_addr_virt;
985 pHba->irq_mask = base_addr_virt+0x30;
986 pHba->post_port = base_addr_virt+0x40;
987 pHba->reply_port = base_addr_virt+0x44;
988
989 pHba->hrt = NULL;
990 pHba->lct = NULL;
991 pHba->lct_size = 0;
992 pHba->status_block = NULL;
993 pHba->post_count = 0;
994 pHba->state = DPTI_STATE_RESET;
995 pHba->pDev = pDev;
996 pHba->devices = NULL;
997
998 // Initializing the spinlocks
999 spin_lock_init(&pHba->state_lock);
1000 spin_lock_init(&adpt_post_wait_lock);
1001
1002 if(raptorFlag == 0){
1003 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1004 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1005 } else {
1006 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1007 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1008 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1009 }
1010
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001011 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1013 adpt_i2o_delete_hba(pHba);
1014 return -EINVAL;
1015 }
1016
1017 return 0;
1018}
1019
1020
1021static void adpt_i2o_delete_hba(adpt_hba* pHba)
1022{
1023 adpt_hba* p1;
1024 adpt_hba* p2;
1025 struct i2o_device* d;
1026 struct i2o_device* next;
1027 int i;
1028 int j;
1029 struct adpt_device* pDev;
1030 struct adpt_device* pNext;
1031
1032
Arjan van de Ven0b950672006-01-11 13:16:10 +01001033 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 // scsi_unregister calls our adpt_release which
1035 // does a quiese
1036 if(pHba->host){
1037 free_irq(pHba->host->irq, pHba);
1038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 p2 = NULL;
1040 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1041 if(p1 == pHba) {
1042 if(p2) {
1043 p2->next = p1->next;
1044 } else {
1045 hba_chain = p1->next;
1046 }
1047 break;
1048 }
1049 }
1050
1051 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001052 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
1054 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001055 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1057 iounmap(pHba->msg_addr_virt);
1058 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08001059 kfree(pHba->hrt);
1060 kfree(pHba->lct);
1061 kfree(pHba->status_block);
1062 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064 for(d = pHba->devices; d ; d = next){
1065 next = d->next;
1066 kfree(d);
1067 }
1068 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1069 for(j = 0; j < MAX_ID; j++){
1070 if(pHba->channel[i].device[j] != NULL){
1071 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1072 pNext = pDev->next_lun;
1073 kfree(pDev);
1074 }
1075 }
1076 }
1077 }
Alan Coxa07f3532006-09-15 15:34:32 +01001078 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 kfree(pHba);
1080
1081 if(hba_count <= 0){
1082 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1083 }
1084}
1085
1086
1087static int adpt_init(void)
1088{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090#ifdef REBOOT_NOTIFIER
1091 register_reboot_notifier(&adpt_reboot_notifier);
1092#endif
1093
1094 return 0;
1095}
1096
1097
1098static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1099{
1100 struct adpt_device* d;
1101
1102 if(chan < 0 || chan >= MAX_CHANNEL)
1103 return NULL;
1104
1105 if( pHba->channel[chan].device == NULL){
1106 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1107 return NULL;
1108 }
1109
1110 d = pHba->channel[chan].device[id];
1111 if(!d || d->tid == 0) {
1112 return NULL;
1113 }
1114
1115 /* If it is the only lun at that address then this should match*/
1116 if(d->scsi_lun == lun){
1117 return d;
1118 }
1119
1120 /* else we need to look through all the luns */
1121 for(d=d->next_lun ; d ; d = d->next_lun){
1122 if(d->scsi_lun == lun){
1123 return d;
1124 }
1125 }
1126 return NULL;
1127}
1128
1129
1130static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1131{
1132 // I used my own version of the WAIT_QUEUE_HEAD
1133 // to handle some version differences
1134 // When embedded in the kernel this could go back to the vanilla one
1135 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1136 int status = 0;
1137 ulong flags = 0;
1138 struct adpt_i2o_post_wait_data *p1, *p2;
1139 struct adpt_i2o_post_wait_data *wait_data =
1140 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001141 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Andrew Morton4452ea52005-06-23 00:10:26 -07001143 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 /*
1147 * The spin locking is needed to keep anyone from playing
1148 * with the queue pointers and id while we do the same
1149 */
1150 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1151 // TODO we need a MORE unique way of getting ids
1152 // to support async LCT get
1153 wait_data->next = adpt_post_wait_queue;
1154 adpt_post_wait_queue = wait_data;
1155 adpt_post_wait_id++;
1156 adpt_post_wait_id &= 0x7fff;
1157 wait_data->id = adpt_post_wait_id;
1158 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1159
1160 wait_data->wq = &adpt_wq_i2o_post;
1161 wait_data->status = -ETIMEDOUT;
1162
Andrew Morton4452ea52005-06-23 00:10:26 -07001163 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1166 timeout *= HZ;
1167 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1168 set_current_state(TASK_INTERRUPTIBLE);
1169 if(pHba->host)
1170 spin_unlock_irq(pHba->host->host_lock);
1171 if (!timeout)
1172 schedule();
1173 else{
1174 timeout = schedule_timeout(timeout);
1175 if (timeout == 0) {
1176 // I/O issued, but cannot get result in
1177 // specified time. Freeing resorces is
1178 // dangerous.
1179 status = -ETIME;
1180 }
1181 }
1182 if(pHba->host)
1183 spin_lock_irq(pHba->host->host_lock);
1184 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001185 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 if(status == -ETIMEDOUT){
1188 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1189 // We will have to free the wait_data memory during shutdown
1190 return status;
1191 }
1192
1193 /* Remove the entry from the queue. */
1194 p2 = NULL;
1195 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1196 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1197 if(p1 == wait_data) {
1198 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1199 status = -EOPNOTSUPP;
1200 }
1201 if(p2) {
1202 p2->next = p1->next;
1203 } else {
1204 adpt_post_wait_queue = p1->next;
1205 }
1206 break;
1207 }
1208 }
1209 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1210
1211 kfree(wait_data);
1212
1213 return status;
1214}
1215
1216
1217static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1218{
1219
1220 u32 m = EMPTY_QUEUE;
1221 u32 __iomem *msg;
1222 ulong timeout = jiffies + 30*HZ;
1223 do {
1224 rmb();
1225 m = readl(pHba->post_port);
1226 if (m != EMPTY_QUEUE) {
1227 break;
1228 }
1229 if(time_after(jiffies,timeout)){
1230 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1231 return -ETIMEDOUT;
1232 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001233 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 } while(m == EMPTY_QUEUE);
1235
1236 msg = pHba->msg_addr_virt + m;
1237 memcpy_toio(msg, data, len);
1238 wmb();
1239
1240 //post message
1241 writel(m, pHba->post_port);
1242 wmb();
1243
1244 return 0;
1245}
1246
1247
1248static void adpt_i2o_post_wait_complete(u32 context, int status)
1249{
1250 struct adpt_i2o_post_wait_data *p1 = NULL;
1251 /*
1252 * We need to search through the adpt_post_wait
1253 * queue to see if the given message is still
1254 * outstanding. If not, it means that the IOP
1255 * took longer to respond to the message than we
1256 * had allowed and timer has already expired.
1257 * Not much we can do about that except log
1258 * it for debug purposes, increase timeout, and recompile
1259 *
1260 * Lock needed to keep anyone from moving queue pointers
1261 * around while we're looking through them.
1262 */
1263
1264 context &= 0x7fff;
1265
1266 spin_lock(&adpt_post_wait_lock);
1267 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1268 if(p1->id == context) {
1269 p1->status = status;
1270 spin_unlock(&adpt_post_wait_lock);
1271 wake_up_interruptible(p1->wq);
1272 return;
1273 }
1274 }
1275 spin_unlock(&adpt_post_wait_lock);
1276 // If this happens we lose commands that probably really completed
1277 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1278 printk(KERN_DEBUG" Tasks in wait queue:\n");
1279 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1280 printk(KERN_DEBUG" %d\n",p1->id);
1281 }
1282 return;
1283}
1284
1285static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1286{
1287 u32 msg[8];
1288 u8* status;
1289 u32 m = EMPTY_QUEUE ;
1290 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1291
1292 if(pHba->initialized == FALSE) { // First time reset should be quick
1293 timeout = jiffies + (25*HZ);
1294 } else {
1295 adpt_i2o_quiesce_hba(pHba);
1296 }
1297
1298 do {
1299 rmb();
1300 m = readl(pHba->post_port);
1301 if (m != EMPTY_QUEUE) {
1302 break;
1303 }
1304 if(time_after(jiffies,timeout)){
1305 printk(KERN_WARNING"Timeout waiting for message!\n");
1306 return -ETIMEDOUT;
1307 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001308 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 } while (m == EMPTY_QUEUE);
1310
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301311 status = kzalloc(4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if(status == NULL) {
1313 adpt_send_nop(pHba, m);
1314 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1315 return -ENOMEM;
1316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1319 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1320 msg[2]=0;
1321 msg[3]=0;
1322 msg[4]=0;
1323 msg[5]=0;
1324 msg[6]=virt_to_bus(status);
1325 msg[7]=0;
1326
1327 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1328 wmb();
1329 writel(m, pHba->post_port);
1330 wmb();
1331
1332 while(*status == 0){
1333 if(time_after(jiffies,timeout)){
1334 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1335 kfree(status);
1336 return -ETIMEDOUT;
1337 }
1338 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001339 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 }
1341
1342 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1343 PDEBUG("%s: Reset in progress...\n", pHba->name);
1344 // Here we wait for message frame to become available
1345 // indicated that reset has finished
1346 do {
1347 rmb();
1348 m = readl(pHba->post_port);
1349 if (m != EMPTY_QUEUE) {
1350 break;
1351 }
1352 if(time_after(jiffies,timeout)){
1353 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1354 return -ETIMEDOUT;
1355 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001356 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 } while (m == EMPTY_QUEUE);
1358 // Flush the offset
1359 adpt_send_nop(pHba, m);
1360 }
1361 adpt_i2o_status_get(pHba);
1362 if(*status == 0x02 ||
1363 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1364 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1365 pHba->name);
1366 } else {
1367 PDEBUG("%s: Reset completed.\n", pHba->name);
1368 }
1369
1370 kfree(status);
1371#ifdef UARTDELAY
1372 // This delay is to allow someone attached to the card through the debug UART to
1373 // set up the dump levels that they want before the rest of the initialization sequence
1374 adpt_delay(20000);
1375#endif
1376 return 0;
1377}
1378
1379
1380static int adpt_i2o_parse_lct(adpt_hba* pHba)
1381{
1382 int i;
1383 int max;
1384 int tid;
1385 struct i2o_device *d;
1386 i2o_lct *lct = pHba->lct;
1387 u8 bus_no = 0;
1388 s16 scsi_id;
1389 s16 scsi_lun;
1390 u32 buf[10]; // larger than 7, or 8 ...
1391 struct adpt_device* pDev;
1392
1393 if (lct == NULL) {
1394 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1395 return -1;
1396 }
1397
1398 max = lct->table_size;
1399 max -= 3;
1400 max /= 9;
1401
1402 for(i=0;i<max;i++) {
1403 if( lct->lct_entry[i].user_tid != 0xfff){
1404 /*
1405 * If we have hidden devices, we need to inform the upper layers about
1406 * the possible maximum id reference to handle device access when
1407 * an array is disassembled. This code has no other purpose but to
1408 * allow us future access to devices that are currently hidden
1409 * behind arrays, hotspares or have not been configured (JBOD mode).
1410 */
1411 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1412 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1413 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1414 continue;
1415 }
1416 tid = lct->lct_entry[i].tid;
1417 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1418 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1419 continue;
1420 }
1421 bus_no = buf[0]>>16;
1422 scsi_id = buf[1];
1423 scsi_lun = (buf[2]>>8 )&0xff;
1424 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1425 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1426 continue;
1427 }
1428 if (scsi_id >= MAX_ID){
1429 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1430 continue;
1431 }
1432 if(bus_no > pHba->top_scsi_channel){
1433 pHba->top_scsi_channel = bus_no;
1434 }
1435 if(scsi_id > pHba->top_scsi_id){
1436 pHba->top_scsi_id = scsi_id;
1437 }
1438 if(scsi_lun > pHba->top_scsi_lun){
1439 pHba->top_scsi_lun = scsi_lun;
1440 }
1441 continue;
1442 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001443 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 if(d==NULL)
1445 {
1446 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1447 return -ENOMEM;
1448 }
1449
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001450 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 d->next = NULL;
1452
1453 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1454
1455 d->flags = 0;
1456 tid = d->lct_data.tid;
1457 adpt_i2o_report_hba_unit(pHba, d);
1458 adpt_i2o_install_device(pHba, d);
1459 }
1460 bus_no = 0;
1461 for(d = pHba->devices; d ; d = d->next) {
1462 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1463 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1464 tid = d->lct_data.tid;
1465 // TODO get the bus_no from hrt-but for now they are in order
1466 //bus_no =
1467 if(bus_no > pHba->top_scsi_channel){
1468 pHba->top_scsi_channel = bus_no;
1469 }
1470 pHba->channel[bus_no].type = d->lct_data.class_id;
1471 pHba->channel[bus_no].tid = tid;
1472 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1473 {
1474 pHba->channel[bus_no].scsi_id = buf[1];
1475 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1476 }
1477 // TODO remove - this is just until we get from hrt
1478 bus_no++;
1479 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1480 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1481 break;
1482 }
1483 }
1484 }
1485
1486 // Setup adpt_device table
1487 for(d = pHba->devices; d ; d = d->next) {
1488 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1489 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1490 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1491
1492 tid = d->lct_data.tid;
1493 scsi_id = -1;
1494 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1495 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1496 bus_no = buf[0]>>16;
1497 scsi_id = buf[1];
1498 scsi_lun = (buf[2]>>8 )&0xff;
1499 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1500 continue;
1501 }
1502 if (scsi_id >= MAX_ID) {
1503 continue;
1504 }
1505 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301506 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 if(pDev == NULL) {
1508 return -ENOMEM;
1509 }
1510 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 } else {
1512 for( pDev = pHba->channel[bus_no].device[scsi_id];
1513 pDev->next_lun; pDev = pDev->next_lun){
1514 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301515 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 if(pDev->next_lun == NULL) {
1517 return -ENOMEM;
1518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 pDev = pDev->next_lun;
1520 }
1521 pDev->tid = tid;
1522 pDev->scsi_channel = bus_no;
1523 pDev->scsi_id = scsi_id;
1524 pDev->scsi_lun = scsi_lun;
1525 pDev->pI2o_dev = d;
1526 d->owner = pDev;
1527 pDev->type = (buf[0])&0xff;
1528 pDev->flags = (buf[0]>>8)&0xff;
1529 if(scsi_id > pHba->top_scsi_id){
1530 pHba->top_scsi_id = scsi_id;
1531 }
1532 if(scsi_lun > pHba->top_scsi_lun){
1533 pHba->top_scsi_lun = scsi_lun;
1534 }
1535 }
1536 if(scsi_id == -1){
1537 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1538 d->lct_data.identity_tag);
1539 }
1540 }
1541 }
1542 return 0;
1543}
1544
1545
1546/*
1547 * Each I2O controller has a chain of devices on it - these match
1548 * the useful parts of the LCT of the board.
1549 */
1550
1551static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1552{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001553 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 d->controller=pHba;
1555 d->owner=NULL;
1556 d->next=pHba->devices;
1557 d->prev=NULL;
1558 if (pHba->devices != NULL){
1559 pHba->devices->prev=d;
1560 }
1561 pHba->devices=d;
1562 *d->dev_name = 0;
1563
Arjan van de Ven0b950672006-01-11 13:16:10 +01001564 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 return 0;
1566}
1567
1568static int adpt_open(struct inode *inode, struct file *file)
1569{
1570 int minor;
1571 adpt_hba* pHba;
1572
1573 //TODO check for root access
1574 //
1575 minor = iminor(inode);
1576 if (minor >= hba_count) {
1577 return -ENXIO;
1578 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001579 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1581 if (pHba->unit == minor) {
1582 break; /* found adapter */
1583 }
1584 }
1585 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001586 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 return -ENXIO;
1588 }
1589
1590// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001591 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592// return -EBUSY;
1593// }
1594
1595 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001596 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
1598 return 0;
1599}
1600
1601static int adpt_close(struct inode *inode, struct file *file)
1602{
1603 int minor;
1604 adpt_hba* pHba;
1605
1606 minor = iminor(inode);
1607 if (minor >= hba_count) {
1608 return -ENXIO;
1609 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001610 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1612 if (pHba->unit == minor) {
1613 break; /* found adapter */
1614 }
1615 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001616 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 if (pHba == NULL) {
1618 return -ENXIO;
1619 }
1620
1621 pHba->in_use = 0;
1622
1623 return 0;
1624}
1625
1626
1627static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1628{
1629 u32 msg[MAX_MESSAGE_SIZE];
1630 u32* reply = NULL;
1631 u32 size = 0;
1632 u32 reply_size = 0;
1633 u32 __user *user_msg = arg;
1634 u32 __user * user_reply = NULL;
1635 void *sg_list[pHba->sg_tablesize];
1636 u32 sg_offset = 0;
1637 u32 sg_count = 0;
1638 int sg_index = 0;
1639 u32 i = 0;
1640 u32 rcode = 0;
1641 void *p = NULL;
1642 ulong flags = 0;
1643
1644 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1645 // get user msg size in u32s
1646 if(get_user(size, &user_msg[0])){
1647 return -EFAULT;
1648 }
1649 size = size>>16;
1650
1651 user_reply = &user_msg[size];
1652 if(size > MAX_MESSAGE_SIZE){
1653 return -EFAULT;
1654 }
1655 size *= 4; // Convert to bytes
1656
1657 /* Copy in the user's I2O command */
1658 if(copy_from_user(msg, user_msg, size)) {
1659 return -EFAULT;
1660 }
1661 get_user(reply_size, &user_reply[0]);
1662 reply_size = reply_size>>16;
1663 if(reply_size > REPLY_FRAME_SIZE){
1664 reply_size = REPLY_FRAME_SIZE;
1665 }
1666 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301667 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 if(reply == NULL) {
1669 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1670 return -ENOMEM;
1671 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 sg_offset = (msg[0]>>4)&0xf;
1673 msg[2] = 0x40000000; // IOCTL context
1674 msg[3] = (u32)reply;
1675 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1676 if(sg_offset) {
1677 // TODO 64bit fix
1678 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1679 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1680 if (sg_count > pHba->sg_tablesize){
1681 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1682 kfree (reply);
1683 return -EINVAL;
1684 }
1685
1686 for(i = 0; i < sg_count; i++) {
1687 int sg_size;
1688
1689 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1690 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1691 rcode = -EINVAL;
1692 goto cleanup;
1693 }
1694 sg_size = sg[i].flag_count & 0xffffff;
1695 /* Allocate memory for the transfer */
1696 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1697 if(!p) {
1698 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1699 pHba->name,sg_size,i,sg_count);
1700 rcode = -ENOMEM;
1701 goto cleanup;
1702 }
1703 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1704 /* Copy in the user's SG buffer if necessary */
1705 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1706 // TODO 64bit fix
1707 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1708 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1709 rcode = -EFAULT;
1710 goto cleanup;
1711 }
1712 }
1713 //TODO 64bit fix
1714 sg[i].addr_bus = (u32)virt_to_bus(p);
1715 }
1716 }
1717
1718 do {
1719 if(pHba->host)
1720 spin_lock_irqsave(pHba->host->host_lock, flags);
1721 // This state stops any new commands from enterring the
1722 // controller while processing the ioctl
1723// pHba->state |= DPTI_STATE_IOCTL;
1724// We can't set this now - The scsi subsystem sets host_blocked and
1725// the queue empties and stops. We need a way to restart the queue
1726 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1727 if (rcode != 0)
1728 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1729 rcode, reply);
1730// pHba->state &= ~DPTI_STATE_IOCTL;
1731 if(pHba->host)
1732 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1733 } while(rcode == -ETIMEDOUT);
1734
1735 if(rcode){
1736 goto cleanup;
1737 }
1738
1739 if(sg_offset) {
1740 /* Copy back the Scatter Gather buffers back to user space */
1741 u32 j;
1742 // TODO 64bit fix
1743 struct sg_simple_element* sg;
1744 int sg_size;
1745
1746 // re-acquire the original message to handle correctly the sg copy operation
1747 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1748 // get user msg size in u32s
1749 if(get_user(size, &user_msg[0])){
1750 rcode = -EFAULT;
1751 goto cleanup;
1752 }
1753 size = size>>16;
1754 size *= 4;
1755 /* Copy in the user's I2O command */
1756 if (copy_from_user (msg, user_msg, size)) {
1757 rcode = -EFAULT;
1758 goto cleanup;
1759 }
1760 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1761
1762 // TODO 64bit fix
1763 sg = (struct sg_simple_element*)(msg + sg_offset);
1764 for (j = 0; j < sg_count; j++) {
1765 /* Copy out the SG list to user's buffer if necessary */
1766 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1767 sg_size = sg[j].flag_count & 0xffffff;
1768 // TODO 64bit fix
1769 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1770 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1771 rcode = -EFAULT;
1772 goto cleanup;
1773 }
1774 }
1775 }
1776 }
1777
1778 /* Copy back the reply to user space */
1779 if (reply_size) {
1780 // we wrote our own values for context - now restore the user supplied ones
1781 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1782 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1783 rcode = -EFAULT;
1784 }
1785 if(copy_to_user(user_reply, reply, reply_size)) {
1786 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1787 rcode = -EFAULT;
1788 }
1789 }
1790
1791
1792cleanup:
1793 if (rcode != -ETIME && rcode != -EINTR)
1794 kfree (reply);
1795 while(sg_index) {
1796 if(sg_list[--sg_index]) {
1797 if (rcode != -ETIME && rcode != -EINTR)
1798 kfree(sg_list[sg_index]);
1799 }
1800 }
1801 return rcode;
1802}
1803
1804
1805/*
1806 * This routine returns information about the system. This does not effect
1807 * any logic and if the info is wrong - it doesn't matter.
1808 */
1809
1810/* Get all the info we can not get from kernel services */
1811static int adpt_system_info(void __user *buffer)
1812{
1813 sysInfo_S si;
1814
1815 memset(&si, 0, sizeof(si));
1816
1817 si.osType = OS_LINUX;
Adrian Bunka4cd16e2005-06-25 14:59:01 -07001818 si.osMajorVersion = 0;
1819 si.osMinorVersion = 0;
1820 si.osRevision = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 si.busType = SI_PCI_BUS;
1822 si.processorFamily = DPTI_sig.dsProcessorFamily;
1823
1824#if defined __i386__
1825 adpt_i386_info(&si);
1826#elif defined (__ia64__)
1827 adpt_ia64_info(&si);
1828#elif defined(__sparc__)
1829 adpt_sparc_info(&si);
1830#elif defined (__alpha__)
1831 adpt_alpha_info(&si);
1832#else
1833 si.processorType = 0xff ;
1834#endif
1835 if(copy_to_user(buffer, &si, sizeof(si))){
1836 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1837 return -EFAULT;
1838 }
1839
1840 return 0;
1841}
1842
1843#if defined __ia64__
1844static void adpt_ia64_info(sysInfo_S* si)
1845{
1846 // This is all the info we need for now
1847 // We will add more info as our new
1848 // managmenent utility requires it
1849 si->processorType = PROC_IA64;
1850}
1851#endif
1852
1853
1854#if defined __sparc__
1855static void adpt_sparc_info(sysInfo_S* si)
1856{
1857 // This is all the info we need for now
1858 // We will add more info as our new
1859 // managmenent utility requires it
1860 si->processorType = PROC_ULTRASPARC;
1861}
1862#endif
1863
1864#if defined __alpha__
1865static void adpt_alpha_info(sysInfo_S* si)
1866{
1867 // This is all the info we need for now
1868 // We will add more info as our new
1869 // managmenent utility requires it
1870 si->processorType = PROC_ALPHA;
1871}
1872#endif
1873
1874#if defined __i386__
1875
1876static void adpt_i386_info(sysInfo_S* si)
1877{
1878 // This is all the info we need for now
1879 // We will add more info as our new
1880 // managmenent utility requires it
1881 switch (boot_cpu_data.x86) {
1882 case CPU_386:
1883 si->processorType = PROC_386;
1884 break;
1885 case CPU_486:
1886 si->processorType = PROC_486;
1887 break;
1888 case CPU_586:
1889 si->processorType = PROC_PENTIUM;
1890 break;
1891 default: // Just in case
1892 si->processorType = PROC_PENTIUM;
1893 break;
1894 }
1895}
1896
1897#endif
1898
1899
1900static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1901 ulong arg)
1902{
1903 int minor;
1904 int error = 0;
1905 adpt_hba* pHba;
1906 ulong flags = 0;
1907 void __user *argp = (void __user *)arg;
1908
1909 minor = iminor(inode);
1910 if (minor >= DPTI_MAX_HBA){
1911 return -ENXIO;
1912 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001913 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1915 if (pHba->unit == minor) {
1916 break; /* found adapter */
1917 }
1918 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001919 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 if(pHba == NULL){
1921 return -ENXIO;
1922 }
1923
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001924 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1925 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927 switch (cmd) {
1928 // TODO: handle 3 cases
1929 case DPT_SIGNATURE:
1930 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1931 return -EFAULT;
1932 }
1933 break;
1934 case I2OUSRCMD:
1935 return adpt_i2o_passthru(pHba, argp);
1936
1937 case DPT_CTRLINFO:{
1938 drvrHBAinfo_S HbaInfo;
1939
1940#define FLG_OSD_PCI_VALID 0x0001
1941#define FLG_OSD_DMA 0x0002
1942#define FLG_OSD_I2O 0x0004
1943 memset(&HbaInfo, 0, sizeof(HbaInfo));
1944 HbaInfo.drvrHBAnum = pHba->unit;
1945 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1946 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1947 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1948 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1949 HbaInfo.Interrupt = pHba->pDev->irq;
1950 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1951 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1952 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1953 return -EFAULT;
1954 }
1955 break;
1956 }
1957 case DPT_SYSINFO:
1958 return adpt_system_info(argp);
1959 case DPT_BLINKLED:{
1960 u32 value;
1961 value = (u32)adpt_read_blink_led(pHba);
1962 if (copy_to_user(argp, &value, sizeof(value))) {
1963 return -EFAULT;
1964 }
1965 break;
1966 }
1967 case I2ORESETCMD:
1968 if(pHba->host)
1969 spin_lock_irqsave(pHba->host->host_lock, flags);
1970 adpt_hba_reset(pHba);
1971 if(pHba->host)
1972 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1973 break;
1974 case I2ORESCANCMD:
1975 adpt_rescan(pHba);
1976 break;
1977 default:
1978 return -EINVAL;
1979 }
1980
1981 return error;
1982}
1983
1984
David Howells7d12e782006-10-05 14:55:46 +01001985static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986{
1987 struct scsi_cmnd* cmd;
1988 adpt_hba* pHba = dev_id;
1989 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001990 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 u32 status=0;
1992 u32 context;
1993 ulong flags = 0;
1994 int handled = 0;
1995
1996 if (pHba == NULL){
1997 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1998 return IRQ_NONE;
1999 }
2000 if(pHba->host)
2001 spin_lock_irqsave(pHba->host->host_lock, flags);
2002
2003 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2004 m = readl(pHba->reply_port);
2005 if(m == EMPTY_QUEUE){
2006 // Try twice then give up
2007 rmb();
2008 m = readl(pHba->reply_port);
2009 if(m == EMPTY_QUEUE){
2010 // This really should not happen
2011 printk(KERN_ERR"dpti: Could not get reply frame\n");
2012 goto out;
2013 }
2014 }
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002015 reply = bus_to_virt(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 if (readl(reply) & MSG_FAIL) {
2018 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002019 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 u32 old_context;
2021 PDEBUG("%s: Failed message\n",pHba->name);
2022 if(old_m >= 0x100000){
2023 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2024 writel(m,pHba->reply_port);
2025 continue;
2026 }
2027 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002028 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 old_context = readl(msg+12);
2030 writel(old_context, reply+12);
2031 adpt_send_nop(pHba, old_m);
2032 }
2033 context = readl(reply+8);
2034 if(context & 0x40000000){ // IOCTL
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002035 void *p = (void *)readl(reply+12);
2036 if( p != NULL) {
2037 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 }
2039 // All IOCTLs will also be post wait
2040 }
2041 if(context & 0x80000000){ // Post wait message
2042 status = readl(reply+16);
2043 if(status >> 24){
2044 status &= 0xffff; /* Get detail status */
2045 } else {
2046 status = I2O_POST_WAIT_OK;
2047 }
2048 if(!(context & 0x40000000)) {
2049 cmd = (struct scsi_cmnd*) readl(reply+12);
2050 if(cmd != NULL) {
2051 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2052 }
2053 }
2054 adpt_i2o_post_wait_complete(context, status);
2055 } else { // SCSI message
2056 cmd = (struct scsi_cmnd*) readl(reply+12);
2057 if(cmd != NULL){
2058 if(cmd->serial_number != 0) { // If not timedout
2059 adpt_i2o_to_scsi(reply, cmd);
2060 }
2061 }
2062 }
2063 writel(m, pHba->reply_port);
2064 wmb();
2065 rmb();
2066 }
2067 handled = 1;
2068out: if(pHba->host)
2069 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2070 return IRQ_RETVAL(handled);
2071}
2072
2073static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2074{
2075 int i;
2076 u32 msg[MAX_MESSAGE_SIZE];
2077 u32* mptr;
2078 u32 *lenptr;
2079 int direction;
2080 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002081 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 u32 len;
2083 u32 reqlen;
2084 s32 rcode;
2085
2086 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002087 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 direction = 0x00000000;
2089
2090 scsidir = 0x00000000; // DATA NO XFER
2091 if(len) {
2092 /*
2093 * Set SCBFlags to indicate if data is being transferred
2094 * in or out, or no data transfer
2095 * Note: Do not have to verify index is less than 0 since
2096 * cmd->cmnd[0] is an unsigned char
2097 */
2098 switch(cmd->sc_data_direction){
2099 case DMA_FROM_DEVICE:
2100 scsidir =0x40000000; // DATA IN (iop<--dev)
2101 break;
2102 case DMA_TO_DEVICE:
2103 direction=0x04000000; // SGL OUT
2104 scsidir =0x80000000; // DATA OUT (iop-->dev)
2105 break;
2106 case DMA_NONE:
2107 break;
2108 case DMA_BIDIRECTIONAL:
2109 scsidir =0x40000000; // DATA IN (iop<--dev)
2110 // Assume In - and continue;
2111 break;
2112 default:
2113 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2114 pHba->name, cmd->cmnd[0]);
2115 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2116 cmd->scsi_done(cmd);
2117 return 0;
2118 }
2119 }
2120 // msg[0] is set later
2121 // I2O_CMD_SCSI_EXEC
2122 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2123 msg[2] = 0;
2124 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2125 // Our cards use the transaction context as the tag for queueing
2126 // Adaptec/DPT Private stuff
2127 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2128 msg[5] = d->tid;
2129 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2130 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2131 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2132 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2133 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2134
2135 mptr=msg+7;
2136
2137 // Write SCSI command into the message - always 16 byte block
2138 memset(mptr, 0, 16);
2139 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2140 mptr+=4;
2141 lenptr=mptr++; /* Remember me - fill in when we know */
2142 reqlen = 14; // SINGLE SGE
2143 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002145 nseg = scsi_dma_map(cmd);
2146 BUG_ON(nseg < 0);
2147 if (nseg) {
2148 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002151 scsi_for_each_sg(cmd, sg, nseg, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2153 len+=sg_dma_len(sg);
2154 *mptr++ = sg_dma_address(sg);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002155 /* Make this an end of list */
2156 if (i == nseg - 1)
2157 mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 reqlen = mptr - msg;
2160 *lenptr = len;
2161
2162 if(cmd->underflow && len != cmd->underflow){
2163 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2164 len, cmd->underflow);
2165 }
2166 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002167 *lenptr = len = 0;
2168 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 }
2170
2171 /* Stick the headers on */
2172 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2173
2174 // Send it on it's way
2175 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2176 if (rcode == 0) {
2177 return 0;
2178 }
2179 return rcode;
2180}
2181
2182
2183static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2184{
2185 struct Scsi_Host *host = NULL;
2186
2187 host = scsi_register(sht, sizeof(adpt_hba*));
2188 if (host == NULL) {
2189 printk ("%s: scsi_register returned NULL\n",pHba->name);
2190 return -1;
2191 }
2192 host->hostdata[0] = (unsigned long)pHba;
2193 pHba->host = host;
2194
2195 host->irq = pHba->pDev->irq;
2196 /* no IO ports, so don't have to set host->io_port and
2197 * host->n_io_port
2198 */
2199 host->io_port = 0;
2200 host->n_io_port = 0;
Hennede77aaf2006-10-04 10:22:09 +02002201 /* see comments in scsi_host.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 host->max_id = 16;
2203 host->max_lun = 256;
2204 host->max_channel = pHba->top_scsi_channel + 1;
2205 host->cmd_per_lun = 1;
2206 host->unique_id = (uint) pHba;
2207 host->sg_tablesize = pHba->sg_tablesize;
2208 host->can_queue = pHba->post_fifo_size;
2209
2210 return 0;
2211}
2212
2213
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002214static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215{
2216 adpt_hba* pHba;
2217 u32 hba_status;
2218 u32 dev_status;
2219 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2220 // I know this would look cleaner if I just read bytes
2221 // but the model I have been using for all the rest of the
2222 // io is in 4 byte words - so I keep that model
2223 u16 detailed_status = readl(reply+16) &0xffff;
2224 dev_status = (detailed_status & 0xff);
2225 hba_status = detailed_status >> 8;
2226
2227 // calculate resid for sg
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002228 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
2230 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2231
2232 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2233
2234 if(!(reply_flags & MSG_FAIL)) {
2235 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2236 case I2O_SCSI_DSC_SUCCESS:
2237 cmd->result = (DID_OK << 16);
2238 // handle underflow
2239 if(readl(reply+5) < cmd->underflow ) {
2240 cmd->result = (DID_ERROR <<16);
2241 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2242 }
2243 break;
2244 case I2O_SCSI_DSC_REQUEST_ABORTED:
2245 cmd->result = (DID_ABORT << 16);
2246 break;
2247 case I2O_SCSI_DSC_PATH_INVALID:
2248 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2249 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2250 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2251 case I2O_SCSI_DSC_NO_ADAPTER:
2252 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2253 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2254 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2255 cmd->result = (DID_TIME_OUT << 16);
2256 break;
2257 case I2O_SCSI_DSC_ADAPTER_BUSY:
2258 case I2O_SCSI_DSC_BUS_BUSY:
2259 cmd->result = (DID_BUS_BUSY << 16);
2260 break;
2261 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2262 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2263 cmd->result = (DID_RESET << 16);
2264 break;
2265 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2266 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2267 cmd->result = (DID_PARITY << 16);
2268 break;
2269 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2270 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2271 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2272 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2273 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2274 case I2O_SCSI_DSC_DATA_OVERRUN:
2275 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2276 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2277 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2278 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2279 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2280 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2281 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2282 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2283 case I2O_SCSI_DSC_INVALID_CDB:
2284 case I2O_SCSI_DSC_LUN_INVALID:
2285 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2286 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2287 case I2O_SCSI_DSC_NO_NEXUS:
2288 case I2O_SCSI_DSC_CDB_RECEIVED:
2289 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2290 case I2O_SCSI_DSC_QUEUE_FROZEN:
2291 case I2O_SCSI_DSC_REQUEST_INVALID:
2292 default:
2293 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2294 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2295 hba_status, dev_status, cmd->cmnd[0]);
2296 cmd->result = (DID_ERROR << 16);
2297 break;
2298 }
2299
2300 // copy over the request sense data if it was a check
2301 // condition status
2302 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2303 u32 len = sizeof(cmd->sense_buffer);
2304 len = (len > 40) ? 40 : len;
2305 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002306 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2308 cmd->sense_buffer[2] == DATA_PROTECT ){
2309 /* This is to handle an array failed */
2310 cmd->result = (DID_TIME_OUT << 16);
2311 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2312 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2313 hba_status, dev_status, cmd->cmnd[0]);
2314
2315 }
2316 }
2317 } else {
2318 /* In this condtion we could not talk to the tid
2319 * the card rejected it. We should signal a retry
2320 * for a limitted number of retries.
2321 */
2322 cmd->result = (DID_TIME_OUT << 16);
2323 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2324 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2325 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2326 }
2327
2328 cmd->result |= (dev_status);
2329
2330 if(cmd->scsi_done != NULL){
2331 cmd->scsi_done(cmd);
2332 }
2333 return cmd->result;
2334}
2335
2336
2337static s32 adpt_rescan(adpt_hba* pHba)
2338{
2339 s32 rcode;
2340 ulong flags = 0;
2341
2342 if(pHba->host)
2343 spin_lock_irqsave(pHba->host->host_lock, flags);
2344 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2345 goto out;
2346 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2347 goto out;
2348 rcode = 0;
2349out: if(pHba->host)
2350 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2351 return rcode;
2352}
2353
2354
2355static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2356{
2357 int i;
2358 int max;
2359 int tid;
2360 struct i2o_device *d;
2361 i2o_lct *lct = pHba->lct;
2362 u8 bus_no = 0;
2363 s16 scsi_id;
2364 s16 scsi_lun;
2365 u32 buf[10]; // at least 8 u32's
2366 struct adpt_device* pDev = NULL;
2367 struct i2o_device* pI2o_dev = NULL;
2368
2369 if (lct == NULL) {
2370 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2371 return -1;
2372 }
2373
2374 max = lct->table_size;
2375 max -= 3;
2376 max /= 9;
2377
2378 // Mark each drive as unscanned
2379 for (d = pHba->devices; d; d = d->next) {
2380 pDev =(struct adpt_device*) d->owner;
2381 if(!pDev){
2382 continue;
2383 }
2384 pDev->state |= DPTI_DEV_UNSCANNED;
2385 }
2386
2387 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2388
2389 for(i=0;i<max;i++) {
2390 if( lct->lct_entry[i].user_tid != 0xfff){
2391 continue;
2392 }
2393
2394 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2395 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2396 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2397 tid = lct->lct_entry[i].tid;
2398 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2399 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2400 continue;
2401 }
2402 bus_no = buf[0]>>16;
2403 scsi_id = buf[1];
2404 scsi_lun = (buf[2]>>8 )&0xff;
2405 pDev = pHba->channel[bus_no].device[scsi_id];
2406 /* da lun */
2407 while(pDev) {
2408 if(pDev->scsi_lun == scsi_lun) {
2409 break;
2410 }
2411 pDev = pDev->next_lun;
2412 }
2413 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002414 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 if(d==NULL)
2416 {
2417 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2418 return -ENOMEM;
2419 }
2420
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002421 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 d->next = NULL;
2423
2424 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2425
2426 d->flags = 0;
2427 adpt_i2o_report_hba_unit(pHba, d);
2428 adpt_i2o_install_device(pHba, d);
2429
2430 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2431 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2432 continue;
2433 }
2434 pDev = pHba->channel[bus_no].device[scsi_id];
2435 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302436 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 if(pDev == NULL) {
2438 return -ENOMEM;
2439 }
2440 pHba->channel[bus_no].device[scsi_id] = pDev;
2441 } else {
2442 while (pDev->next_lun) {
2443 pDev = pDev->next_lun;
2444 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302445 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 if(pDev == NULL) {
2447 return -ENOMEM;
2448 }
2449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 pDev->tid = d->lct_data.tid;
2451 pDev->scsi_channel = bus_no;
2452 pDev->scsi_id = scsi_id;
2453 pDev->scsi_lun = scsi_lun;
2454 pDev->pI2o_dev = d;
2455 d->owner = pDev;
2456 pDev->type = (buf[0])&0xff;
2457 pDev->flags = (buf[0]>>8)&0xff;
2458 // Too late, SCSI system has made up it's mind, but what the hey ...
2459 if(scsi_id > pHba->top_scsi_id){
2460 pHba->top_scsi_id = scsi_id;
2461 }
2462 if(scsi_lun > pHba->top_scsi_lun){
2463 pHba->top_scsi_lun = scsi_lun;
2464 }
2465 continue;
2466 } // end of new i2o device
2467
2468 // We found an old device - check it
2469 while(pDev) {
2470 if(pDev->scsi_lun == scsi_lun) {
2471 if(!scsi_device_online(pDev->pScsi_dev)) {
2472 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2473 pHba->name,bus_no,scsi_id,scsi_lun);
2474 if (pDev->pScsi_dev) {
2475 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2476 }
2477 }
2478 d = pDev->pI2o_dev;
2479 if(d->lct_data.tid != tid) { // something changed
2480 pDev->tid = tid;
2481 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2482 if (pDev->pScsi_dev) {
2483 pDev->pScsi_dev->changed = TRUE;
2484 pDev->pScsi_dev->removable = TRUE;
2485 }
2486 }
2487 // Found it - mark it scanned
2488 pDev->state = DPTI_DEV_ONLINE;
2489 break;
2490 }
2491 pDev = pDev->next_lun;
2492 }
2493 }
2494 }
2495 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2496 pDev =(struct adpt_device*) pI2o_dev->owner;
2497 if(!pDev){
2498 continue;
2499 }
2500 // Drive offline drives that previously existed but could not be found
2501 // in the LCT table
2502 if (pDev->state & DPTI_DEV_UNSCANNED){
2503 pDev->state = DPTI_DEV_OFFLINE;
2504 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2505 if (pDev->pScsi_dev) {
2506 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2507 }
2508 }
2509 }
2510 return 0;
2511}
2512
2513static void adpt_fail_posted_scbs(adpt_hba* pHba)
2514{
2515 struct scsi_cmnd* cmd = NULL;
2516 struct scsi_device* d = NULL;
2517
2518 shost_for_each_device(d, pHba->host) {
2519 unsigned long flags;
2520 spin_lock_irqsave(&d->list_lock, flags);
2521 list_for_each_entry(cmd, &d->cmd_list, list) {
2522 if(cmd->serial_number == 0){
2523 continue;
2524 }
2525 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2526 cmd->scsi_done(cmd);
2527 }
2528 spin_unlock_irqrestore(&d->list_lock, flags);
2529 }
2530}
2531
2532
2533/*============================================================================
2534 * Routines from i2o subsystem
2535 *============================================================================
2536 */
2537
2538
2539
2540/*
2541 * Bring an I2O controller into HOLD state. See the spec.
2542 */
2543static int adpt_i2o_activate_hba(adpt_hba* pHba)
2544{
2545 int rcode;
2546
2547 if(pHba->initialized ) {
2548 if (adpt_i2o_status_get(pHba) < 0) {
2549 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2550 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2551 return rcode;
2552 }
2553 if (adpt_i2o_status_get(pHba) < 0) {
2554 printk(KERN_INFO "HBA not responding.\n");
2555 return -1;
2556 }
2557 }
2558
2559 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2560 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2561 return -1;
2562 }
2563
2564 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2565 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2566 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2567 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2568 adpt_i2o_reset_hba(pHba);
2569 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2570 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2571 return -1;
2572 }
2573 }
2574 } else {
2575 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2576 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2577 return rcode;
2578 }
2579
2580 }
2581
2582 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2583 return -1;
2584 }
2585
2586 /* In HOLD state */
2587
2588 if (adpt_i2o_hrt_get(pHba) < 0) {
2589 return -1;
2590 }
2591
2592 return 0;
2593}
2594
2595/*
2596 * Bring a controller online into OPERATIONAL state.
2597 */
2598
2599static int adpt_i2o_online_hba(adpt_hba* pHba)
2600{
2601 if (adpt_i2o_systab_send(pHba) < 0) {
2602 adpt_i2o_delete_hba(pHba);
2603 return -1;
2604 }
2605 /* In READY state */
2606
2607 if (adpt_i2o_enable_hba(pHba) < 0) {
2608 adpt_i2o_delete_hba(pHba);
2609 return -1;
2610 }
2611
2612 /* In OPERATIONAL state */
2613 return 0;
2614}
2615
2616static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2617{
2618 u32 __iomem *msg;
2619 ulong timeout = jiffies + 5*HZ;
2620
2621 while(m == EMPTY_QUEUE){
2622 rmb();
2623 m = readl(pHba->post_port);
2624 if(m != EMPTY_QUEUE){
2625 break;
2626 }
2627 if(time_after(jiffies,timeout)){
2628 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2629 return 2;
2630 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002631 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 }
2633 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2634 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2635 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2636 writel( 0,&msg[2]);
2637 wmb();
2638
2639 writel(m, pHba->post_port);
2640 wmb();
2641 return 0;
2642}
2643
2644static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2645{
2646 u8 *status;
2647 u32 __iomem *msg = NULL;
2648 int i;
2649 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2650 u32* ptr;
2651 u32 outbound_frame; // This had to be a 32 bit address
2652 u32 m;
2653
2654 do {
2655 rmb();
2656 m = readl(pHba->post_port);
2657 if (m != EMPTY_QUEUE) {
2658 break;
2659 }
2660
2661 if(time_after(jiffies,timeout)){
2662 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2663 return -ETIMEDOUT;
2664 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002665 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 } while(m == EMPTY_QUEUE);
2667
2668 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2669
2670 status = kmalloc(4,GFP_KERNEL|ADDR32);
2671 if (status==NULL) {
2672 adpt_send_nop(pHba, m);
2673 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2674 pHba->name);
2675 return -ENOMEM;
2676 }
2677 memset(status, 0, 4);
2678
2679 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2680 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2681 writel(0, &msg[2]);
2682 writel(0x0106, &msg[3]); /* Transaction context */
2683 writel(4096, &msg[4]); /* Host page frame size */
2684 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2685 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2686 writel(virt_to_bus(status), &msg[7]);
2687
2688 writel(m, pHba->post_port);
2689 wmb();
2690
2691 // Wait for the reply status to come back
2692 do {
2693 if (*status) {
2694 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2695 break;
2696 }
2697 }
2698 rmb();
2699 if(time_after(jiffies,timeout)){
2700 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2701 return -ETIMEDOUT;
2702 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002703 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 } while (1);
2705
2706 // If the command was successful, fill the fifo with our reply
2707 // message packets
2708 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002709 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 return -2;
2711 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002712 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002714 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002716 pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 if(!pHba->reply_pool){
2718 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2719 return -1;
2720 }
2721 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2722
2723 ptr = pHba->reply_pool;
2724 for(i = 0; i < pHba->reply_fifo_size; i++) {
2725 outbound_frame = (u32)virt_to_bus(ptr);
2726 writel(outbound_frame, pHba->reply_port);
2727 wmb();
2728 ptr += REPLY_FRAME_SIZE;
2729 }
2730 adpt_i2o_status_get(pHba);
2731 return 0;
2732}
2733
2734
2735/*
2736 * I2O System Table. Contains information about
2737 * all the IOPs in the system. Used to inform IOPs
2738 * about each other's existence.
2739 *
2740 * sys_tbl_ver is the CurrentChangeIndicator that is
2741 * used by IOPs to track changes.
2742 */
2743
2744
2745
2746static s32 adpt_i2o_status_get(adpt_hba* pHba)
2747{
2748 ulong timeout;
2749 u32 m;
2750 u32 __iomem *msg;
2751 u8 *status_block=NULL;
2752 ulong status_block_bus;
2753
2754 if(pHba->status_block == NULL) {
2755 pHba->status_block = (i2o_status_block*)
2756 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2757 if(pHba->status_block == NULL) {
2758 printk(KERN_ERR
2759 "dpti%d: Get Status Block failed; Out of memory. \n",
2760 pHba->unit);
2761 return -ENOMEM;
2762 }
2763 }
2764 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2765 status_block = (u8*)(pHba->status_block);
2766 status_block_bus = virt_to_bus(pHba->status_block);
2767 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2768 do {
2769 rmb();
2770 m = readl(pHba->post_port);
2771 if (m != EMPTY_QUEUE) {
2772 break;
2773 }
2774 if(time_after(jiffies,timeout)){
2775 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2776 pHba->name);
2777 return -ETIMEDOUT;
2778 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002779 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 } while(m==EMPTY_QUEUE);
2781
2782
2783 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2784
2785 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2786 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2787 writel(1, &msg[2]);
2788 writel(0, &msg[3]);
2789 writel(0, &msg[4]);
2790 writel(0, &msg[5]);
2791 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2792 writel(0, &msg[7]);
2793 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2794
2795 //post message
2796 writel(m, pHba->post_port);
2797 wmb();
2798
2799 while(status_block[87]!=0xff){
2800 if(time_after(jiffies,timeout)){
2801 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2802 pHba->unit);
2803 return -ETIMEDOUT;
2804 }
2805 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002806 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 }
2808
2809 // Set up our number of outbound and inbound messages
2810 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2811 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2812 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2813 }
2814
2815 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2816 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2817 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2818 }
2819
2820 // Calculate the Scatter Gather list size
2821 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2822 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2823 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2824 }
2825
2826
2827#ifdef DEBUG
2828 printk("dpti%d: State = ",pHba->unit);
2829 switch(pHba->status_block->iop_state) {
2830 case 0x01:
2831 printk("INIT\n");
2832 break;
2833 case 0x02:
2834 printk("RESET\n");
2835 break;
2836 case 0x04:
2837 printk("HOLD\n");
2838 break;
2839 case 0x05:
2840 printk("READY\n");
2841 break;
2842 case 0x08:
2843 printk("OPERATIONAL\n");
2844 break;
2845 case 0x10:
2846 printk("FAILED\n");
2847 break;
2848 case 0x11:
2849 printk("FAULTED\n");
2850 break;
2851 default:
2852 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2853 }
2854#endif
2855 return 0;
2856}
2857
2858/*
2859 * Get the IOP's Logical Configuration Table
2860 */
2861static int adpt_i2o_lct_get(adpt_hba* pHba)
2862{
2863 u32 msg[8];
2864 int ret;
2865 u32 buf[16];
2866
2867 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2868 pHba->lct_size = pHba->status_block->expected_lct_size;
2869 }
2870 do {
2871 if (pHba->lct == NULL) {
2872 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2873 if(pHba->lct == NULL) {
2874 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2875 pHba->name);
2876 return -ENOMEM;
2877 }
2878 }
2879 memset(pHba->lct, 0, pHba->lct_size);
2880
2881 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2882 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2883 msg[2] = 0;
2884 msg[3] = 0;
2885 msg[4] = 0xFFFFFFFF; /* All devices */
2886 msg[5] = 0x00000000; /* Report now */
2887 msg[6] = 0xD0000000|pHba->lct_size;
2888 msg[7] = virt_to_bus(pHba->lct);
2889
2890 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2891 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2892 pHba->name, ret);
2893 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2894 return ret;
2895 }
2896
2897 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2898 pHba->lct_size = pHba->lct->table_size << 2;
2899 kfree(pHba->lct);
2900 pHba->lct = NULL;
2901 }
2902 } while (pHba->lct == NULL);
2903
2904 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2905
2906
2907 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2908 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2909 pHba->FwDebugBufferSize = buf[1];
2910 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2911 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2912 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2913 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2914 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2915 pHba->FwDebugBuffer_P += buf[2];
2916 pHba->FwDebugFlags = 0;
2917 }
2918
2919 return 0;
2920}
2921
2922static int adpt_i2o_build_sys_table(void)
2923{
2924 adpt_hba* pHba = NULL;
2925 int count = 0;
2926
2927 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2928 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2929
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002930 kfree(sys_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2933 if(!sys_tbl) {
2934 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2935 return -ENOMEM;
2936 }
2937 memset(sys_tbl, 0, sys_tbl_len);
2938
2939 sys_tbl->num_entries = hba_count;
2940 sys_tbl->version = I2OVERSION;
2941 sys_tbl->change_ind = sys_tbl_ind++;
2942
2943 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2944 // Get updated Status Block so we have the latest information
2945 if (adpt_i2o_status_get(pHba)) {
2946 sys_tbl->num_entries--;
2947 continue; // try next one
2948 }
2949
2950 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2951 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2952 sys_tbl->iops[count].seg_num = 0;
2953 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2954 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2955 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2956 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2957 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2958 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002959 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2960 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
2962 count++;
2963 }
2964
2965#ifdef DEBUG
2966{
2967 u32 *table = (u32*)sys_tbl;
2968 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2969 for(count = 0; count < (sys_tbl_len >>2); count++) {
2970 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2971 count, table[count]);
2972 }
2973}
2974#endif
2975
2976 return 0;
2977}
2978
2979
2980/*
2981 * Dump the information block associated with a given unit (TID)
2982 */
2983
2984static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2985{
2986 char buf[64];
2987 int unit = d->lct_data.tid;
2988
2989 printk(KERN_INFO "TID %3.3d ", unit);
2990
2991 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2992 {
2993 buf[16]=0;
2994 printk(" Vendor: %-12.12s", buf);
2995 }
2996 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2997 {
2998 buf[16]=0;
2999 printk(" Device: %-12.12s", buf);
3000 }
3001 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3002 {
3003 buf[8]=0;
3004 printk(" Rev: %-12.12s\n", buf);
3005 }
3006#ifdef DEBUG
3007 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3008 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3009 printk(KERN_INFO "\tFlags: ");
3010
3011 if(d->lct_data.device_flags&(1<<0))
3012 printk("C"); // ConfigDialog requested
3013 if(d->lct_data.device_flags&(1<<1))
3014 printk("U"); // Multi-user capable
3015 if(!(d->lct_data.device_flags&(1<<4)))
3016 printk("P"); // Peer service enabled!
3017 if(!(d->lct_data.device_flags&(1<<5)))
3018 printk("M"); // Mgmt service enabled!
3019 printk("\n");
3020#endif
3021}
3022
3023#ifdef DEBUG
3024/*
3025 * Do i2o class name lookup
3026 */
3027static const char *adpt_i2o_get_class_name(int class)
3028{
3029 int idx = 16;
3030 static char *i2o_class_name[] = {
3031 "Executive",
3032 "Device Driver Module",
3033 "Block Device",
3034 "Tape Device",
3035 "LAN Interface",
3036 "WAN Interface",
3037 "Fibre Channel Port",
3038 "Fibre Channel Device",
3039 "SCSI Device",
3040 "ATE Port",
3041 "ATE Device",
3042 "Floppy Controller",
3043 "Floppy Device",
3044 "Secondary Bus Port",
3045 "Peer Transport Agent",
3046 "Peer Transport",
3047 "Unknown"
3048 };
3049
3050 switch(class&0xFFF) {
3051 case I2O_CLASS_EXECUTIVE:
3052 idx = 0; break;
3053 case I2O_CLASS_DDM:
3054 idx = 1; break;
3055 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3056 idx = 2; break;
3057 case I2O_CLASS_SEQUENTIAL_STORAGE:
3058 idx = 3; break;
3059 case I2O_CLASS_LAN:
3060 idx = 4; break;
3061 case I2O_CLASS_WAN:
3062 idx = 5; break;
3063 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3064 idx = 6; break;
3065 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3066 idx = 7; break;
3067 case I2O_CLASS_SCSI_PERIPHERAL:
3068 idx = 8; break;
3069 case I2O_CLASS_ATE_PORT:
3070 idx = 9; break;
3071 case I2O_CLASS_ATE_PERIPHERAL:
3072 idx = 10; break;
3073 case I2O_CLASS_FLOPPY_CONTROLLER:
3074 idx = 11; break;
3075 case I2O_CLASS_FLOPPY_DEVICE:
3076 idx = 12; break;
3077 case I2O_CLASS_BUS_ADAPTER_PORT:
3078 idx = 13; break;
3079 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3080 idx = 14; break;
3081 case I2O_CLASS_PEER_TRANSPORT:
3082 idx = 15; break;
3083 }
3084 return i2o_class_name[idx];
3085}
3086#endif
3087
3088
3089static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3090{
3091 u32 msg[6];
3092 int ret, size = sizeof(i2o_hrt);
3093
3094 do {
3095 if (pHba->hrt == NULL) {
3096 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3097 if (pHba->hrt == NULL) {
3098 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3099 return -ENOMEM;
3100 }
3101 }
3102
3103 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3104 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3105 msg[2]= 0;
3106 msg[3]= 0;
3107 msg[4]= (0xD0000000 | size); /* Simple transaction */
3108 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3109
3110 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3111 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3112 return ret;
3113 }
3114
3115 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3116 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3117 kfree(pHba->hrt);
3118 pHba->hrt = NULL;
3119 }
3120 } while(pHba->hrt == NULL);
3121 return 0;
3122}
3123
3124/*
3125 * Query one scalar group value or a whole scalar group.
3126 */
3127static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3128 int group, int field, void *buf, int buflen)
3129{
3130 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3131 u8 *resblk;
3132
3133 int size;
3134
3135 /* 8 bytes for header */
3136 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3137 if (resblk == NULL) {
3138 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3139 return -ENOMEM;
3140 }
3141
3142 if (field == -1) /* whole group */
3143 opblk[4] = -1;
3144
3145 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3146 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3147 if (size == -ETIME) {
3148 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3149 return -ETIME;
3150 } else if (size == -EINTR) {
3151 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3152 return -EINTR;
3153 }
3154
3155 memcpy(buf, resblk+8, buflen); /* cut off header */
3156
3157 kfree(resblk);
3158 if (size < 0)
3159 return size;
3160
3161 return buflen;
3162}
3163
3164
3165/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3166 *
3167 * This function can be used for all UtilParamsGet/Set operations.
3168 * The OperationBlock is given in opblk-buffer,
3169 * and results are returned in resblk-buffer.
3170 * Note that the minimum sized resblk is 8 bytes and contains
3171 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3172 */
3173static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3174 void *opblk, int oplen, void *resblk, int reslen)
3175{
3176 u32 msg[9];
3177 u32 *res = (u32 *)resblk;
3178 int wait_status;
3179
3180 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3181 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3182 msg[2] = 0;
3183 msg[3] = 0;
3184 msg[4] = 0;
3185 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3186 msg[6] = virt_to_bus(opblk);
3187 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3188 msg[8] = virt_to_bus(resblk);
3189
3190 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3191 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3192 return wait_status; /* -DetailedStatus */
3193 }
3194
3195 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3196 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3197 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3198 pHba->name,
3199 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3200 : "PARAMS_GET",
3201 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3202 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3203 }
3204
3205 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3206}
3207
3208
3209static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3210{
3211 u32 msg[4];
3212 int ret;
3213
3214 adpt_i2o_status_get(pHba);
3215
3216 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3217
3218 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3219 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3220 return 0;
3221 }
3222
3223 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3224 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3225 msg[2] = 0;
3226 msg[3] = 0;
3227
3228 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3229 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3230 pHba->unit, -ret);
3231 } else {
3232 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3233 }
3234
3235 adpt_i2o_status_get(pHba);
3236 return ret;
3237}
3238
3239
3240/*
3241 * Enable IOP. Allows the IOP to resume external operations.
3242 */
3243static int adpt_i2o_enable_hba(adpt_hba* pHba)
3244{
3245 u32 msg[4];
3246 int ret;
3247
3248 adpt_i2o_status_get(pHba);
3249 if(!pHba->status_block){
3250 return -ENOMEM;
3251 }
3252 /* Enable only allowed on READY state */
3253 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3254 return 0;
3255
3256 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3257 return -EINVAL;
3258
3259 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3260 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3261 msg[2]= 0;
3262 msg[3]= 0;
3263
3264 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3265 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3266 pHba->name, ret);
3267 } else {
3268 PDEBUG("%s: Enabled.\n", pHba->name);
3269 }
3270
3271 adpt_i2o_status_get(pHba);
3272 return ret;
3273}
3274
3275
3276static int adpt_i2o_systab_send(adpt_hba* pHba)
3277{
3278 u32 msg[12];
3279 int ret;
3280
3281 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3282 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3283 msg[2] = 0;
3284 msg[3] = 0;
3285 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3286 msg[5] = 0; /* Segment 0 */
3287
3288 /*
3289 * Provide three SGL-elements:
3290 * System table (SysTab), Private memory space declaration and
3291 * Private i/o space declaration
3292 */
3293 msg[6] = 0x54000000 | sys_tbl_len;
3294 msg[7] = virt_to_phys(sys_tbl);
3295 msg[8] = 0x54000000 | 0;
3296 msg[9] = 0;
3297 msg[10] = 0xD4000000 | 0;
3298 msg[11] = 0;
3299
3300 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3301 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3302 pHba->name, ret);
3303 }
3304#ifdef DEBUG
3305 else {
3306 PINFO("%s: SysTab set.\n", pHba->name);
3307 }
3308#endif
3309
3310 return ret;
3311 }
3312
3313
3314/*============================================================================
3315 *
3316 *============================================================================
3317 */
3318
3319
3320#ifdef UARTDELAY
3321
3322static static void adpt_delay(int millisec)
3323{
3324 int i;
3325 for (i = 0; i < millisec; i++) {
3326 udelay(1000); /* delay for one millisecond */
3327 }
3328}
3329
3330#endif
3331
3332static struct scsi_host_template driver_template = {
3333 .name = "dpt_i2o",
3334 .proc_name = "dpt_i2o",
3335 .proc_info = adpt_proc_info,
3336 .detect = adpt_detect,
3337 .release = adpt_release,
3338 .info = adpt_info,
3339 .queuecommand = adpt_queue,
3340 .eh_abort_handler = adpt_abort,
3341 .eh_device_reset_handler = adpt_device_reset,
3342 .eh_bus_reset_handler = adpt_bus_reset,
3343 .eh_host_reset_handler = adpt_reset,
3344 .bios_param = adpt_bios_param,
3345 .slave_configure = adpt_slave_configure,
3346 .can_queue = MAX_TO_IOP_MESSAGES,
3347 .this_id = 7,
3348 .cmd_per_lun = 1,
3349 .use_clustering = ENABLE_CLUSTERING,
3350};
3351#include "scsi_module.c"
3352MODULE_LICENSE("GPL");