blob: 6c384ab2884a537ef2a472ffabec8eecbb7c4e22 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/module.h>
38
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42////////////////////////////////////////////////////////////////
43
44#include <linux/ioctl.h> /* For SCSI-Passthrough */
45#include <asm/uaccess.h>
46
47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/pci.h> /* for PCI support */
50#include <linux/proc_fs.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h> /* for udelay */
53#include <linux/interrupt.h>
54#include <linux/kernel.h> /* for printk */
55#include <linux/sched.h>
56#include <linux/reboot.h>
57#include <linux/spinlock.h>
58#include <linux/smp_lock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080059#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61#include <linux/timer.h>
62#include <linux/string.h>
63#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010064#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66#include <asm/processor.h> /* for boot_cpu_data */
67#include <asm/pgtable.h>
68#include <asm/io.h> /* for virt_to_bus, etc. */
69
70#include <scsi/scsi.h>
71#include <scsi/scsi_cmnd.h>
72#include <scsi/scsi_device.h>
73#include <scsi/scsi_host.h>
74#include <scsi/scsi_tcq.h>
75
76#include "dpt/dptsig.h"
77#include "dpti.h"
78
79/*============================================================================
80 * Create a binary signature - this is read by dptsig
81 * Needed for our management apps
82 *============================================================================
83 */
84static dpt_sig_S DPTI_sig = {
85 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
86#ifdef __i386__
87 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
88#elif defined(__ia64__)
89 PROC_INTEL, PROC_IA64,
90#elif defined(__sparc__)
91 PROC_ULTRASPARC, PROC_ULTRASPARC,
92#elif defined(__alpha__)
93 PROC_ALPHA, PROC_ALPHA,
94#else
95 (-1),(-1),
96#endif
97 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
98 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
99 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100};
101
102
103
104
105/*============================================================================
106 * Globals
107 *============================================================================
108 */
109
Arjan van de Ven0b950672006-01-11 13:16:10 +0100110static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112static struct i2o_sys_tbl *sys_tbl = NULL;
113static int sys_tbl_ind = 0;
114static int sys_tbl_len = 0;
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static adpt_hba* hba_chain = NULL;
117static int hba_count = 0;
118
Arjan van de Ven00977a52007-02-12 00:55:34 -0800119static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 .ioctl = adpt_ioctl,
121 .open = adpt_open,
122 .release = adpt_close
123};
124
125#ifdef REBOOT_NOTIFIER
126static struct notifier_block adpt_reboot_notifier =
127{
128 adpt_reboot_event,
129 NULL,
130 0
131};
132#endif
133
134/* Structures and definitions for synchronous message posting.
135 * See adpt_i2o_post_wait() for description
136 * */
137struct adpt_i2o_post_wait_data
138{
139 int status;
140 u32 id;
141 adpt_wait_queue_head_t *wq;
142 struct adpt_i2o_post_wait_data *next;
143};
144
145static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
146static u32 adpt_post_wait_id = 0;
147static DEFINE_SPINLOCK(adpt_post_wait_lock);
148
149
150/*============================================================================
151 * Functions
152 *============================================================================
153 */
154
155static u8 adpt_read_blink_led(adpt_hba* host)
156{
157 if(host->FwDebugBLEDflag_P != 0) {
158 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
159 return readb(host->FwDebugBLEDvalue_P);
160 }
161 }
162 return 0;
163}
164
165/*============================================================================
166 * Scsi host template interface functions
167 *============================================================================
168 */
169
170static struct pci_device_id dptids[] = {
171 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
173 { 0, }
174};
175MODULE_DEVICE_TABLE(pci,dptids);
176
177static int adpt_detect(struct scsi_host_template* sht)
178{
179 struct pci_dev *pDev = NULL;
180 adpt_hba* pHba;
181
182 adpt_init();
183
184 PINFO("Detecting Adaptec I2O RAID controllers...\n");
185
186 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100187 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 if(pDev->device == PCI_DPT_DEVICE_ID ||
189 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
190 if(adpt_install_hba(sht, pDev) ){
191 PERROR("Could not Init an I2O RAID device\n");
192 PERROR("Will not try to detect others.\n");
193 return hba_count-1;
194 }
Alan Coxa07f3532006-09-15 15:34:32 +0100195 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 }
197 }
198
199 /* In INIT state, Activate IOPs */
200 for (pHba = hba_chain; pHba; pHba = pHba->next) {
201 // Activate does get status , init outbound, and get hrt
202 if (adpt_i2o_activate_hba(pHba) < 0) {
203 adpt_i2o_delete_hba(pHba);
204 }
205 }
206
207
208 /* Active IOPs in HOLD state */
209
210rebuild_sys_tab:
211 if (hba_chain == NULL)
212 return 0;
213
214 /*
215 * If build_sys_table fails, we kill everything and bail
216 * as we can't init the IOPs w/o a system table
217 */
218 if (adpt_i2o_build_sys_table() < 0) {
219 adpt_i2o_sys_shutdown();
220 return 0;
221 }
222
223 PDEBUG("HBA's in HOLD state\n");
224
225 /* If IOP don't get online, we need to rebuild the System table */
226 for (pHba = hba_chain; pHba; pHba = pHba->next) {
227 if (adpt_i2o_online_hba(pHba) < 0) {
228 adpt_i2o_delete_hba(pHba);
229 goto rebuild_sys_tab;
230 }
231 }
232
233 /* Active IOPs now in OPERATIONAL state */
234 PDEBUG("HBA's in OPERATIONAL state\n");
235
236 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
237 for (pHba = hba_chain; pHba; pHba = pHba->next) {
238 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
239 if (adpt_i2o_lct_get(pHba) < 0){
240 adpt_i2o_delete_hba(pHba);
241 continue;
242 }
243
244 if (adpt_i2o_parse_lct(pHba) < 0){
245 adpt_i2o_delete_hba(pHba);
246 continue;
247 }
248 adpt_inquiry(pHba);
249 }
250
251 for (pHba = hba_chain; pHba; pHba = pHba->next) {
252 if( adpt_scsi_register(pHba,sht) < 0){
253 adpt_i2o_delete_hba(pHba);
254 continue;
255 }
256 pHba->initialized = TRUE;
257 pHba->state &= ~DPTI_STATE_RESET;
258 }
259
260 // Register our control device node
261 // nodes will need to be created in /dev to access this
262 // the nodes can not be created from within the driver
263 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
264 adpt_i2o_sys_shutdown();
265 return 0;
266 }
267 return hba_count;
268}
269
270
271/*
272 * scsi_unregister will be called AFTER we return.
273 */
274static int adpt_release(struct Scsi_Host *host)
275{
276 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
277// adpt_i2o_quiesce_hba(pHba);
278 adpt_i2o_delete_hba(pHba);
279 scsi_unregister(host);
280 return 0;
281}
282
283
284static void adpt_inquiry(adpt_hba* pHba)
285{
286 u32 msg[14];
287 u32 *mptr;
288 u32 *lenptr;
289 int direction;
290 int scsidir;
291 u32 len;
292 u32 reqlen;
293 u8* buf;
294 u8 scb[16];
295 s32 rcode;
296
297 memset(msg, 0, sizeof(msg));
Robert P. J. Day5cbded52006-12-13 00:35:56 -0800298 buf = kmalloc(80,GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 if(!buf){
300 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
301 return;
302 }
303 memset((void*)buf, 0, 36);
304
305 len = 36;
306 direction = 0x00000000;
307 scsidir =0x40000000; // DATA IN (iop<--dev)
308
309 reqlen = 14; // SINGLE SGE
310 /* Stick the headers on */
311 msg[0] = reqlen<<16 | SGL_OFFSET_12;
312 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
313 msg[2] = 0;
314 msg[3] = 0;
315 // Adaptec/DPT Private stuff
316 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
317 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
318 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
319 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
320 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
321 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
322 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
323
324 mptr=msg+7;
325
326 memset(scb, 0, sizeof(scb));
327 // Write SCSI command into the message - always 16 byte block
328 scb[0] = INQUIRY;
329 scb[1] = 0;
330 scb[2] = 0;
331 scb[3] = 0;
332 scb[4] = 36;
333 scb[5] = 0;
334 // Don't care about the rest of scb
335
336 memcpy(mptr, scb, sizeof(scb));
337 mptr+=4;
338 lenptr=mptr++; /* Remember me - fill in when we know */
339
340 /* Now fill in the SGList and command */
341 *lenptr = len;
342 *mptr++ = 0xD0000000|direction|len;
343 *mptr++ = virt_to_bus(buf);
344
345 // Send it on it's way
346 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
347 if (rcode != 0) {
348 sprintf(pHba->detail, "Adaptec I2O RAID");
349 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
350 if (rcode != -ETIME && rcode != -EINTR)
351 kfree(buf);
352 } else {
353 memset(pHba->detail, 0, sizeof(pHba->detail));
354 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
355 memcpy(&(pHba->detail[16]), " Model: ", 8);
356 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
357 memcpy(&(pHba->detail[40]), " FW: ", 4);
358 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
359 pHba->detail[48] = '\0'; /* precautionary */
360 kfree(buf);
361 }
362 adpt_i2o_status_get(pHba);
363 return ;
364}
365
366
367static int adpt_slave_configure(struct scsi_device * device)
368{
369 struct Scsi_Host *host = device->host;
370 adpt_hba* pHba;
371
372 pHba = (adpt_hba *) host->hostdata[0];
373
374 if (host->can_queue && device->tagged_supported) {
375 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
376 host->can_queue - 1);
377 } else {
378 scsi_adjust_queue_depth(device, 0, 1);
379 }
380 return 0;
381}
382
383static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
384{
385 adpt_hba* pHba = NULL;
386 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 cmd->scsi_done = done;
389 /*
390 * SCSI REQUEST_SENSE commands will be executed automatically by the
391 * Host Adapter for any errors, so they should not be executed
392 * explicitly unless the Sense Data is zero indicating that no error
393 * occurred.
394 */
395
396 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
397 cmd->result = (DID_OK << 16);
398 cmd->scsi_done(cmd);
399 return 0;
400 }
401
402 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
403 if (!pHba) {
404 return FAILED;
405 }
406
407 rmb();
408 /*
409 * TODO: I need to block here if I am processing ioctl cmds
410 * but if the outstanding cmds all finish before the ioctl,
411 * the scsi-core will not know to start sending cmds to me again.
412 * I need to a way to restart the scsi-cores queues or should I block
413 * calling scsi_done on the outstanding cmds instead
414 * for now we don't set the IOCTL state
415 */
416 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
417 pHba->host->last_reset = jiffies;
418 pHba->host->resetting = 1;
419 return 1;
420 }
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 // TODO if the cmd->device if offline then I may need to issue a bus rescan
423 // followed by a get_lct to see if the device is there anymore
424 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
425 /*
426 * First command request for this device. Set up a pointer
427 * to the device structure. This should be a TEST_UNIT_READY
428 * command from scan_scsis_single.
429 */
430 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
431 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
432 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
433 cmd->result = (DID_NO_CONNECT << 16);
434 cmd->scsi_done(cmd);
435 return 0;
436 }
437 cmd->device->hostdata = pDev;
438 }
439 pDev->pScsi_dev = cmd->device;
440
441 /*
442 * If we are being called from when the device is being reset,
443 * delay processing of the command until later.
444 */
445 if (pDev->state & DPTI_DEV_RESET ) {
446 return FAILED;
447 }
448 return adpt_scsi_to_i2o(pHba, cmd, pDev);
449}
450
451static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
452 sector_t capacity, int geom[])
453{
454 int heads=-1;
455 int sectors=-1;
456 int cylinders=-1;
457
458 // *** First lets set the default geometry ****
459
460 // If the capacity is less than ox2000
461 if (capacity < 0x2000 ) { // floppy
462 heads = 18;
463 sectors = 2;
464 }
465 // else if between 0x2000 and 0x20000
466 else if (capacity < 0x20000) {
467 heads = 64;
468 sectors = 32;
469 }
470 // else if between 0x20000 and 0x40000
471 else if (capacity < 0x40000) {
472 heads = 65;
473 sectors = 63;
474 }
475 // else if between 0x4000 and 0x80000
476 else if (capacity < 0x80000) {
477 heads = 128;
478 sectors = 63;
479 }
480 // else if greater than 0x80000
481 else {
482 heads = 255;
483 sectors = 63;
484 }
485 cylinders = sector_div(capacity, heads * sectors);
486
487 // Special case if CDROM
488 if(sdev->type == 5) { // CDROM
489 heads = 252;
490 sectors = 63;
491 cylinders = 1111;
492 }
493
494 geom[0] = heads;
495 geom[1] = sectors;
496 geom[2] = cylinders;
497
498 PDEBUG("adpt_bios_param: exit\n");
499 return 0;
500}
501
502
503static const char *adpt_info(struct Scsi_Host *host)
504{
505 adpt_hba* pHba;
506
507 pHba = (adpt_hba *) host->hostdata[0];
508 return (char *) (pHba->detail);
509}
510
511static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
512 int length, int inout)
513{
514 struct adpt_device* d;
515 int id;
516 int chan;
517 int len = 0;
518 int begin = 0;
519 int pos = 0;
520 adpt_hba* pHba;
521 int unit;
522
523 *start = buffer;
524 if (inout == TRUE) {
525 /*
526 * The user has done a write and wants us to take the
527 * data in the buffer and do something with it.
528 * proc_scsiwrite calls us with inout = 1
529 *
530 * Read data from buffer (writing to us) - NOT SUPPORTED
531 */
532 return -EINVAL;
533 }
534
535 /*
536 * inout = 0 means the user has done a read and wants information
537 * returned, so we write information about the cards into the buffer
538 * proc_scsiread() calls us with inout = 0
539 */
540
541 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100542 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 for (pHba = hba_chain; pHba; pHba = pHba->next) {
544 if (pHba->host == host) {
545 break; /* found adapter */
546 }
547 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100548 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 if (pHba == NULL) {
550 return 0;
551 }
552 host = pHba->host;
553
554 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
555 len += sprintf(buffer+len, "%s\n", pHba->detail);
556 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
557 pHba->host->host_no, pHba->name, host->irq);
558 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
559 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
560
561 pos = begin + len;
562
563 /* CHECKPOINT */
564 if(pos > offset + length) {
565 goto stop_output;
566 }
567 if(pos <= offset) {
568 /*
569 * If we haven't even written to where we last left
570 * off (the last time we were called), reset the
571 * beginning pointer.
572 */
573 len = 0;
574 begin = pos;
575 }
576 len += sprintf(buffer+len, "Devices:\n");
577 for(chan = 0; chan < MAX_CHANNEL; chan++) {
578 for(id = 0; id < MAX_ID; id++) {
579 d = pHba->channel[chan].device[id];
580 while(d){
581 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
582 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
583 pos = begin + len;
584
585
586 /* CHECKPOINT */
587 if(pos > offset + length) {
588 goto stop_output;
589 }
590 if(pos <= offset) {
591 len = 0;
592 begin = pos;
593 }
594
595 unit = d->pI2o_dev->lct_data.tid;
596 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
597 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
598 scsi_device_online(d->pScsi_dev)? "online":"offline");
599 pos = begin + len;
600
601 /* CHECKPOINT */
602 if(pos > offset + length) {
603 goto stop_output;
604 }
605 if(pos <= offset) {
606 len = 0;
607 begin = pos;
608 }
609
610 d = d->next_lun;
611 }
612 }
613 }
614
615 /*
616 * begin is where we last checked our position with regards to offset
617 * begin is always less than offset. len is relative to begin. It
618 * is the number of bytes written past begin
619 *
620 */
621stop_output:
622 /* stop the output and calculate the correct length */
623 *(buffer + len) = '\0';
624
625 *start = buffer + (offset - begin); /* Start of wanted data */
626 len -= (offset - begin);
627 if(len > length) {
628 len = length;
629 } else if(len < 0){
630 len = 0;
631 **start = '\0';
632 }
633 return len;
634}
635
636
637/*===========================================================================
638 * Error Handling routines
639 *===========================================================================
640 */
641
642static int adpt_abort(struct scsi_cmnd * cmd)
643{
644 adpt_hba* pHba = NULL; /* host bus adapter structure */
645 struct adpt_device* dptdevice; /* dpt per device information */
646 u32 msg[5];
647 int rcode;
648
649 if(cmd->serial_number == 0){
650 return FAILED;
651 }
652 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
653 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
654 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
655 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
656 return FAILED;
657 }
658
659 memset(msg, 0, sizeof(msg));
660 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
661 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
662 msg[2] = 0;
663 msg[3]= 0;
664 msg[4] = (u32)cmd;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800665 if (pHba->host)
666 spin_lock_irq(pHba->host->host_lock);
667 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
668 if (pHba->host)
669 spin_unlock_irq(pHba->host->host_lock);
670 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if(rcode == -EOPNOTSUPP ){
672 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
673 return FAILED;
674 }
675 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
676 return FAILED;
677 }
678 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
679 return SUCCESS;
680}
681
682
683#define I2O_DEVICE_RESET 0x27
684// This is the same for BLK and SCSI devices
685// NOTE this is wrong in the i2o.h definitions
686// This is not currently supported by our adapter but we issue it anyway
687static int adpt_device_reset(struct scsi_cmnd* cmd)
688{
689 adpt_hba* pHba;
690 u32 msg[4];
691 u32 rcode;
692 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700693 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 pHba = (void*) cmd->device->host->hostdata[0];
696 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
697 if (!d) {
698 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
699 return FAILED;
700 }
701 memset(msg, 0, sizeof(msg));
702 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
703 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
704 msg[2] = 0;
705 msg[3] = 0;
706
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800707 if (pHba->host)
708 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 old_state = d->state;
710 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800711 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
712 d->state = old_state;
713 if (pHba->host)
714 spin_unlock_irq(pHba->host->host_lock);
715 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if(rcode == -EOPNOTSUPP ){
717 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
718 return FAILED;
719 }
720 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
721 return FAILED;
722 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
724 return SUCCESS;
725 }
726}
727
728
729#define I2O_HBA_BUS_RESET 0x87
730// This version of bus reset is called by the eh_error handler
731static int adpt_bus_reset(struct scsi_cmnd* cmd)
732{
733 adpt_hba* pHba;
734 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800735 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
738 memset(msg, 0, sizeof(msg));
739 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
740 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
741 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
742 msg[2] = 0;
743 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800744 if (pHba->host)
745 spin_lock_irq(pHba->host->host_lock);
746 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
747 if (pHba->host)
748 spin_unlock_irq(pHba->host->host_lock);
749 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
751 return FAILED;
752 } else {
753 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
754 return SUCCESS;
755 }
756}
757
758// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400759static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760{
761 adpt_hba* pHba;
762 int rcode;
763 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
764 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
765 rcode = adpt_hba_reset(pHba);
766 if(rcode == 0){
767 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
768 return SUCCESS;
769 } else {
770 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
771 return FAILED;
772 }
773}
774
Jeff Garzik df0ae242005-05-28 07:57:14 -0400775static int adpt_reset(struct scsi_cmnd* cmd)
776{
777 int rc;
778
779 spin_lock_irq(cmd->device->host->host_lock);
780 rc = __adpt_reset(cmd);
781 spin_unlock_irq(cmd->device->host->host_lock);
782
783 return rc;
784}
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
787static int adpt_hba_reset(adpt_hba* pHba)
788{
789 int rcode;
790
791 pHba->state |= DPTI_STATE_RESET;
792
793 // Activate does get status , init outbound, and get hrt
794 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
795 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
796 adpt_i2o_delete_hba(pHba);
797 return rcode;
798 }
799
800 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
801 adpt_i2o_delete_hba(pHba);
802 return rcode;
803 }
804 PDEBUG("%s: in HOLD state\n",pHba->name);
805
806 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
807 adpt_i2o_delete_hba(pHba);
808 return rcode;
809 }
810 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
811
812 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
813 adpt_i2o_delete_hba(pHba);
814 return rcode;
815 }
816
817 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
818 adpt_i2o_delete_hba(pHba);
819 return rcode;
820 }
821 pHba->state &= ~DPTI_STATE_RESET;
822
823 adpt_fail_posted_scbs(pHba);
824 return 0; /* return success */
825}
826
827/*===========================================================================
828 *
829 *===========================================================================
830 */
831
832
833static void adpt_i2o_sys_shutdown(void)
834{
835 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100836 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
838 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
839 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
840 /* Delete all IOPs from the controller chain */
841 /* They should have already been released by the
842 * scsi-core
843 */
844 for (pHba = hba_chain; pHba; pHba = pNext) {
845 pNext = pHba->next;
846 adpt_i2o_delete_hba(pHba);
847 }
848
849 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850// spin_lock_irqsave(&adpt_post_wait_lock, flags);
851 /* Nothing should be outstanding at this point so just
852 * free them
853 */
Adrian Bunk458af542005-11-27 00:36:37 +0100854 for(p1 = adpt_post_wait_queue; p1;) {
855 old = p1;
856 p1 = p1->next;
857 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
859// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
860 adpt_post_wait_queue = NULL;
861
862 printk(KERN_INFO "Adaptec I2O controllers down.\n");
863}
864
865/*
866 * reboot/shutdown notification.
867 *
868 * - Quiesce each IOP in the system
869 *
870 */
871
872#ifdef REBOOT_NOTIFIER
873static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
874{
875
876 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
877 return NOTIFY_DONE;
878
879 adpt_i2o_sys_shutdown();
880
881 return NOTIFY_DONE;
882}
883#endif
884
885
886static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
887{
888
889 adpt_hba* pHba = NULL;
890 adpt_hba* p = NULL;
891 ulong base_addr0_phys = 0;
892 ulong base_addr1_phys = 0;
893 u32 hba_map0_area_size = 0;
894 u32 hba_map1_area_size = 0;
895 void __iomem *base_addr_virt = NULL;
896 void __iomem *msg_addr_virt = NULL;
897
898 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 if(pci_enable_device(pDev)) {
901 return -EINVAL;
902 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500903
904 if (pci_request_regions(pDev, "dpt_i2o")) {
905 PERROR("dpti: adpt_config_hba: pci request region failed\n");
906 return -EINVAL;
907 }
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 pci_set_master(pDev);
Matthias Gehre910638a2006-03-28 01:56:48 -0800910 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) &&
911 pci_set_dma_mask(pDev, DMA_32BIT_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return -EINVAL;
913
914 base_addr0_phys = pci_resource_start(pDev,0);
915 hba_map0_area_size = pci_resource_len(pDev,0);
916
917 // Check if standard PCI card or single BAR Raptor
918 if(pDev->device == PCI_DPT_DEVICE_ID){
919 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
920 // Raptor card with this device id needs 4M
921 hba_map0_area_size = 0x400000;
922 } else { // Not Raptor - it is a PCI card
923 if(hba_map0_area_size > 0x100000 ){
924 hba_map0_area_size = 0x100000;
925 }
926 }
927 } else {// Raptor split BAR config
928 // Use BAR1 in this configuration
929 base_addr1_phys = pci_resource_start(pDev,1);
930 hba_map1_area_size = pci_resource_len(pDev,1);
931 raptorFlag = TRUE;
932 }
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
935 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -0500936 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 PERROR("dpti: adpt_config_hba: io remap failed\n");
938 return -EINVAL;
939 }
940
941 if(raptorFlag == TRUE) {
942 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
943 if (!msg_addr_virt) {
944 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
945 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500946 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return -EINVAL;
948 }
949 } else {
950 msg_addr_virt = base_addr_virt;
951 }
952
953 // Allocate and zero the data structure
954 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
955 if( pHba == NULL) {
956 if(msg_addr_virt != base_addr_virt){
957 iounmap(msg_addr_virt);
958 }
959 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500960 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 return -ENOMEM;
962 }
963 memset(pHba, 0, sizeof(adpt_hba));
964
Arjan van de Ven0b950672006-01-11 13:16:10 +0100965 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967 if(hba_chain != NULL){
968 for(p = hba_chain; p->next; p = p->next);
969 p->next = pHba;
970 } else {
971 hba_chain = pHba;
972 }
973 pHba->next = NULL;
974 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -0700975 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 hba_count++;
977
Arjan van de Ven0b950672006-01-11 13:16:10 +0100978 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 pHba->pDev = pDev;
981 pHba->base_addr_phys = base_addr0_phys;
982
983 // Set up the Virtual Base Address of the I2O Device
984 pHba->base_addr_virt = base_addr_virt;
985 pHba->msg_addr_virt = msg_addr_virt;
986 pHba->irq_mask = base_addr_virt+0x30;
987 pHba->post_port = base_addr_virt+0x40;
988 pHba->reply_port = base_addr_virt+0x44;
989
990 pHba->hrt = NULL;
991 pHba->lct = NULL;
992 pHba->lct_size = 0;
993 pHba->status_block = NULL;
994 pHba->post_count = 0;
995 pHba->state = DPTI_STATE_RESET;
996 pHba->pDev = pDev;
997 pHba->devices = NULL;
998
999 // Initializing the spinlocks
1000 spin_lock_init(&pHba->state_lock);
1001 spin_lock_init(&adpt_post_wait_lock);
1002
1003 if(raptorFlag == 0){
1004 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1005 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1006 } else {
1007 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1008 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1009 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1010 }
1011
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001012 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1014 adpt_i2o_delete_hba(pHba);
1015 return -EINVAL;
1016 }
1017
1018 return 0;
1019}
1020
1021
1022static void adpt_i2o_delete_hba(adpt_hba* pHba)
1023{
1024 adpt_hba* p1;
1025 adpt_hba* p2;
1026 struct i2o_device* d;
1027 struct i2o_device* next;
1028 int i;
1029 int j;
1030 struct adpt_device* pDev;
1031 struct adpt_device* pNext;
1032
1033
Arjan van de Ven0b950672006-01-11 13:16:10 +01001034 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 // scsi_unregister calls our adpt_release which
1036 // does a quiese
1037 if(pHba->host){
1038 free_irq(pHba->host->irq, pHba);
1039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 p2 = NULL;
1041 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1042 if(p1 == pHba) {
1043 if(p2) {
1044 p2->next = p1->next;
1045 } else {
1046 hba_chain = p1->next;
1047 }
1048 break;
1049 }
1050 }
1051
1052 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001053 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001056 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1058 iounmap(pHba->msg_addr_virt);
1059 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08001060 kfree(pHba->hrt);
1061 kfree(pHba->lct);
1062 kfree(pHba->status_block);
1063 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
1065 for(d = pHba->devices; d ; d = next){
1066 next = d->next;
1067 kfree(d);
1068 }
1069 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1070 for(j = 0; j < MAX_ID; j++){
1071 if(pHba->channel[i].device[j] != NULL){
1072 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1073 pNext = pDev->next_lun;
1074 kfree(pDev);
1075 }
1076 }
1077 }
1078 }
Alan Coxa07f3532006-09-15 15:34:32 +01001079 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 kfree(pHba);
1081
1082 if(hba_count <= 0){
1083 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1084 }
1085}
1086
1087
1088static int adpt_init(void)
1089{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091#ifdef REBOOT_NOTIFIER
1092 register_reboot_notifier(&adpt_reboot_notifier);
1093#endif
1094
1095 return 0;
1096}
1097
1098
1099static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1100{
1101 struct adpt_device* d;
1102
1103 if(chan < 0 || chan >= MAX_CHANNEL)
1104 return NULL;
1105
1106 if( pHba->channel[chan].device == NULL){
1107 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1108 return NULL;
1109 }
1110
1111 d = pHba->channel[chan].device[id];
1112 if(!d || d->tid == 0) {
1113 return NULL;
1114 }
1115
1116 /* If it is the only lun at that address then this should match*/
1117 if(d->scsi_lun == lun){
1118 return d;
1119 }
1120
1121 /* else we need to look through all the luns */
1122 for(d=d->next_lun ; d ; d = d->next_lun){
1123 if(d->scsi_lun == lun){
1124 return d;
1125 }
1126 }
1127 return NULL;
1128}
1129
1130
1131static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1132{
1133 // I used my own version of the WAIT_QUEUE_HEAD
1134 // to handle some version differences
1135 // When embedded in the kernel this could go back to the vanilla one
1136 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1137 int status = 0;
1138 ulong flags = 0;
1139 struct adpt_i2o_post_wait_data *p1, *p2;
1140 struct adpt_i2o_post_wait_data *wait_data =
1141 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001142 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Andrew Morton4452ea52005-06-23 00:10:26 -07001144 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 /*
1148 * The spin locking is needed to keep anyone from playing
1149 * with the queue pointers and id while we do the same
1150 */
1151 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1152 // TODO we need a MORE unique way of getting ids
1153 // to support async LCT get
1154 wait_data->next = adpt_post_wait_queue;
1155 adpt_post_wait_queue = wait_data;
1156 adpt_post_wait_id++;
1157 adpt_post_wait_id &= 0x7fff;
1158 wait_data->id = adpt_post_wait_id;
1159 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1160
1161 wait_data->wq = &adpt_wq_i2o_post;
1162 wait_data->status = -ETIMEDOUT;
1163
Andrew Morton4452ea52005-06-23 00:10:26 -07001164 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
1166 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1167 timeout *= HZ;
1168 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 if(pHba->host)
1171 spin_unlock_irq(pHba->host->host_lock);
1172 if (!timeout)
1173 schedule();
1174 else{
1175 timeout = schedule_timeout(timeout);
1176 if (timeout == 0) {
1177 // I/O issued, but cannot get result in
1178 // specified time. Freeing resorces is
1179 // dangerous.
1180 status = -ETIME;
1181 }
1182 }
1183 if(pHba->host)
1184 spin_lock_irq(pHba->host->host_lock);
1185 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001186 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188 if(status == -ETIMEDOUT){
1189 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1190 // We will have to free the wait_data memory during shutdown
1191 return status;
1192 }
1193
1194 /* Remove the entry from the queue. */
1195 p2 = NULL;
1196 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1197 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1198 if(p1 == wait_data) {
1199 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1200 status = -EOPNOTSUPP;
1201 }
1202 if(p2) {
1203 p2->next = p1->next;
1204 } else {
1205 adpt_post_wait_queue = p1->next;
1206 }
1207 break;
1208 }
1209 }
1210 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1211
1212 kfree(wait_data);
1213
1214 return status;
1215}
1216
1217
1218static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1219{
1220
1221 u32 m = EMPTY_QUEUE;
1222 u32 __iomem *msg;
1223 ulong timeout = jiffies + 30*HZ;
1224 do {
1225 rmb();
1226 m = readl(pHba->post_port);
1227 if (m != EMPTY_QUEUE) {
1228 break;
1229 }
1230 if(time_after(jiffies,timeout)){
1231 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1232 return -ETIMEDOUT;
1233 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001234 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 } while(m == EMPTY_QUEUE);
1236
1237 msg = pHba->msg_addr_virt + m;
1238 memcpy_toio(msg, data, len);
1239 wmb();
1240
1241 //post message
1242 writel(m, pHba->post_port);
1243 wmb();
1244
1245 return 0;
1246}
1247
1248
1249static void adpt_i2o_post_wait_complete(u32 context, int status)
1250{
1251 struct adpt_i2o_post_wait_data *p1 = NULL;
1252 /*
1253 * We need to search through the adpt_post_wait
1254 * queue to see if the given message is still
1255 * outstanding. If not, it means that the IOP
1256 * took longer to respond to the message than we
1257 * had allowed and timer has already expired.
1258 * Not much we can do about that except log
1259 * it for debug purposes, increase timeout, and recompile
1260 *
1261 * Lock needed to keep anyone from moving queue pointers
1262 * around while we're looking through them.
1263 */
1264
1265 context &= 0x7fff;
1266
1267 spin_lock(&adpt_post_wait_lock);
1268 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1269 if(p1->id == context) {
1270 p1->status = status;
1271 spin_unlock(&adpt_post_wait_lock);
1272 wake_up_interruptible(p1->wq);
1273 return;
1274 }
1275 }
1276 spin_unlock(&adpt_post_wait_lock);
1277 // If this happens we lose commands that probably really completed
1278 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1279 printk(KERN_DEBUG" Tasks in wait queue:\n");
1280 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1281 printk(KERN_DEBUG" %d\n",p1->id);
1282 }
1283 return;
1284}
1285
1286static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1287{
1288 u32 msg[8];
1289 u8* status;
1290 u32 m = EMPTY_QUEUE ;
1291 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1292
1293 if(pHba->initialized == FALSE) { // First time reset should be quick
1294 timeout = jiffies + (25*HZ);
1295 } else {
1296 adpt_i2o_quiesce_hba(pHba);
1297 }
1298
1299 do {
1300 rmb();
1301 m = readl(pHba->post_port);
1302 if (m != EMPTY_QUEUE) {
1303 break;
1304 }
1305 if(time_after(jiffies,timeout)){
1306 printk(KERN_WARNING"Timeout waiting for message!\n");
1307 return -ETIMEDOUT;
1308 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001309 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 } while (m == EMPTY_QUEUE);
1311
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301312 status = kzalloc(4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 if(status == NULL) {
1314 adpt_send_nop(pHba, m);
1315 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1316 return -ENOMEM;
1317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1320 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1321 msg[2]=0;
1322 msg[3]=0;
1323 msg[4]=0;
1324 msg[5]=0;
1325 msg[6]=virt_to_bus(status);
1326 msg[7]=0;
1327
1328 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1329 wmb();
1330 writel(m, pHba->post_port);
1331 wmb();
1332
1333 while(*status == 0){
1334 if(time_after(jiffies,timeout)){
1335 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1336 kfree(status);
1337 return -ETIMEDOUT;
1338 }
1339 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001340 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 }
1342
1343 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1344 PDEBUG("%s: Reset in progress...\n", pHba->name);
1345 // Here we wait for message frame to become available
1346 // indicated that reset has finished
1347 do {
1348 rmb();
1349 m = readl(pHba->post_port);
1350 if (m != EMPTY_QUEUE) {
1351 break;
1352 }
1353 if(time_after(jiffies,timeout)){
1354 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1355 return -ETIMEDOUT;
1356 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001357 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 } while (m == EMPTY_QUEUE);
1359 // Flush the offset
1360 adpt_send_nop(pHba, m);
1361 }
1362 adpt_i2o_status_get(pHba);
1363 if(*status == 0x02 ||
1364 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1365 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1366 pHba->name);
1367 } else {
1368 PDEBUG("%s: Reset completed.\n", pHba->name);
1369 }
1370
1371 kfree(status);
1372#ifdef UARTDELAY
1373 // This delay is to allow someone attached to the card through the debug UART to
1374 // set up the dump levels that they want before the rest of the initialization sequence
1375 adpt_delay(20000);
1376#endif
1377 return 0;
1378}
1379
1380
1381static int adpt_i2o_parse_lct(adpt_hba* pHba)
1382{
1383 int i;
1384 int max;
1385 int tid;
1386 struct i2o_device *d;
1387 i2o_lct *lct = pHba->lct;
1388 u8 bus_no = 0;
1389 s16 scsi_id;
1390 s16 scsi_lun;
1391 u32 buf[10]; // larger than 7, or 8 ...
1392 struct adpt_device* pDev;
1393
1394 if (lct == NULL) {
1395 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1396 return -1;
1397 }
1398
1399 max = lct->table_size;
1400 max -= 3;
1401 max /= 9;
1402
1403 for(i=0;i<max;i++) {
1404 if( lct->lct_entry[i].user_tid != 0xfff){
1405 /*
1406 * If we have hidden devices, we need to inform the upper layers about
1407 * the possible maximum id reference to handle device access when
1408 * an array is disassembled. This code has no other purpose but to
1409 * allow us future access to devices that are currently hidden
1410 * behind arrays, hotspares or have not been configured (JBOD mode).
1411 */
1412 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1413 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1414 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1415 continue;
1416 }
1417 tid = lct->lct_entry[i].tid;
1418 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1419 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1420 continue;
1421 }
1422 bus_no = buf[0]>>16;
1423 scsi_id = buf[1];
1424 scsi_lun = (buf[2]>>8 )&0xff;
1425 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1426 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1427 continue;
1428 }
1429 if (scsi_id >= MAX_ID){
1430 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1431 continue;
1432 }
1433 if(bus_no > pHba->top_scsi_channel){
1434 pHba->top_scsi_channel = bus_no;
1435 }
1436 if(scsi_id > pHba->top_scsi_id){
1437 pHba->top_scsi_id = scsi_id;
1438 }
1439 if(scsi_lun > pHba->top_scsi_lun){
1440 pHba->top_scsi_lun = scsi_lun;
1441 }
1442 continue;
1443 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001444 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 if(d==NULL)
1446 {
1447 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1448 return -ENOMEM;
1449 }
1450
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001451 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 d->next = NULL;
1453
1454 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1455
1456 d->flags = 0;
1457 tid = d->lct_data.tid;
1458 adpt_i2o_report_hba_unit(pHba, d);
1459 adpt_i2o_install_device(pHba, d);
1460 }
1461 bus_no = 0;
1462 for(d = pHba->devices; d ; d = d->next) {
1463 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1464 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1465 tid = d->lct_data.tid;
1466 // TODO get the bus_no from hrt-but for now they are in order
1467 //bus_no =
1468 if(bus_no > pHba->top_scsi_channel){
1469 pHba->top_scsi_channel = bus_no;
1470 }
1471 pHba->channel[bus_no].type = d->lct_data.class_id;
1472 pHba->channel[bus_no].tid = tid;
1473 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1474 {
1475 pHba->channel[bus_no].scsi_id = buf[1];
1476 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1477 }
1478 // TODO remove - this is just until we get from hrt
1479 bus_no++;
1480 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1481 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1482 break;
1483 }
1484 }
1485 }
1486
1487 // Setup adpt_device table
1488 for(d = pHba->devices; d ; d = d->next) {
1489 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1490 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1491 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1492
1493 tid = d->lct_data.tid;
1494 scsi_id = -1;
1495 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1496 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1497 bus_no = buf[0]>>16;
1498 scsi_id = buf[1];
1499 scsi_lun = (buf[2]>>8 )&0xff;
1500 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1501 continue;
1502 }
1503 if (scsi_id >= MAX_ID) {
1504 continue;
1505 }
1506 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301507 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 if(pDev == NULL) {
1509 return -ENOMEM;
1510 }
1511 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 } else {
1513 for( pDev = pHba->channel[bus_no].device[scsi_id];
1514 pDev->next_lun; pDev = pDev->next_lun){
1515 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301516 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if(pDev->next_lun == NULL) {
1518 return -ENOMEM;
1519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 pDev = pDev->next_lun;
1521 }
1522 pDev->tid = tid;
1523 pDev->scsi_channel = bus_no;
1524 pDev->scsi_id = scsi_id;
1525 pDev->scsi_lun = scsi_lun;
1526 pDev->pI2o_dev = d;
1527 d->owner = pDev;
1528 pDev->type = (buf[0])&0xff;
1529 pDev->flags = (buf[0]>>8)&0xff;
1530 if(scsi_id > pHba->top_scsi_id){
1531 pHba->top_scsi_id = scsi_id;
1532 }
1533 if(scsi_lun > pHba->top_scsi_lun){
1534 pHba->top_scsi_lun = scsi_lun;
1535 }
1536 }
1537 if(scsi_id == -1){
1538 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1539 d->lct_data.identity_tag);
1540 }
1541 }
1542 }
1543 return 0;
1544}
1545
1546
1547/*
1548 * Each I2O controller has a chain of devices on it - these match
1549 * the useful parts of the LCT of the board.
1550 */
1551
1552static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1553{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001554 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 d->controller=pHba;
1556 d->owner=NULL;
1557 d->next=pHba->devices;
1558 d->prev=NULL;
1559 if (pHba->devices != NULL){
1560 pHba->devices->prev=d;
1561 }
1562 pHba->devices=d;
1563 *d->dev_name = 0;
1564
Arjan van de Ven0b950672006-01-11 13:16:10 +01001565 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 return 0;
1567}
1568
1569static int adpt_open(struct inode *inode, struct file *file)
1570{
1571 int minor;
1572 adpt_hba* pHba;
1573
1574 //TODO check for root access
1575 //
1576 minor = iminor(inode);
1577 if (minor >= hba_count) {
1578 return -ENXIO;
1579 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001580 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1582 if (pHba->unit == minor) {
1583 break; /* found adapter */
1584 }
1585 }
1586 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001587 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 return -ENXIO;
1589 }
1590
1591// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001592 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593// return -EBUSY;
1594// }
1595
1596 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001597 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 return 0;
1600}
1601
1602static int adpt_close(struct inode *inode, struct file *file)
1603{
1604 int minor;
1605 adpt_hba* pHba;
1606
1607 minor = iminor(inode);
1608 if (minor >= hba_count) {
1609 return -ENXIO;
1610 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001611 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1613 if (pHba->unit == minor) {
1614 break; /* found adapter */
1615 }
1616 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001617 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 if (pHba == NULL) {
1619 return -ENXIO;
1620 }
1621
1622 pHba->in_use = 0;
1623
1624 return 0;
1625}
1626
1627
1628static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1629{
1630 u32 msg[MAX_MESSAGE_SIZE];
1631 u32* reply = NULL;
1632 u32 size = 0;
1633 u32 reply_size = 0;
1634 u32 __user *user_msg = arg;
1635 u32 __user * user_reply = NULL;
1636 void *sg_list[pHba->sg_tablesize];
1637 u32 sg_offset = 0;
1638 u32 sg_count = 0;
1639 int sg_index = 0;
1640 u32 i = 0;
1641 u32 rcode = 0;
1642 void *p = NULL;
1643 ulong flags = 0;
1644
1645 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1646 // get user msg size in u32s
1647 if(get_user(size, &user_msg[0])){
1648 return -EFAULT;
1649 }
1650 size = size>>16;
1651
1652 user_reply = &user_msg[size];
1653 if(size > MAX_MESSAGE_SIZE){
1654 return -EFAULT;
1655 }
1656 size *= 4; // Convert to bytes
1657
1658 /* Copy in the user's I2O command */
1659 if(copy_from_user(msg, user_msg, size)) {
1660 return -EFAULT;
1661 }
1662 get_user(reply_size, &user_reply[0]);
1663 reply_size = reply_size>>16;
1664 if(reply_size > REPLY_FRAME_SIZE){
1665 reply_size = REPLY_FRAME_SIZE;
1666 }
1667 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301668 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 if(reply == NULL) {
1670 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1671 return -ENOMEM;
1672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 sg_offset = (msg[0]>>4)&0xf;
1674 msg[2] = 0x40000000; // IOCTL context
1675 msg[3] = (u32)reply;
1676 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1677 if(sg_offset) {
1678 // TODO 64bit fix
1679 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1680 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1681 if (sg_count > pHba->sg_tablesize){
1682 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1683 kfree (reply);
1684 return -EINVAL;
1685 }
1686
1687 for(i = 0; i < sg_count; i++) {
1688 int sg_size;
1689
1690 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1691 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1692 rcode = -EINVAL;
1693 goto cleanup;
1694 }
1695 sg_size = sg[i].flag_count & 0xffffff;
1696 /* Allocate memory for the transfer */
1697 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1698 if(!p) {
1699 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1700 pHba->name,sg_size,i,sg_count);
1701 rcode = -ENOMEM;
1702 goto cleanup;
1703 }
1704 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1705 /* Copy in the user's SG buffer if necessary */
1706 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1707 // TODO 64bit fix
1708 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1709 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1710 rcode = -EFAULT;
1711 goto cleanup;
1712 }
1713 }
1714 //TODO 64bit fix
1715 sg[i].addr_bus = (u32)virt_to_bus(p);
1716 }
1717 }
1718
1719 do {
1720 if(pHba->host)
1721 spin_lock_irqsave(pHba->host->host_lock, flags);
1722 // This state stops any new commands from enterring the
1723 // controller while processing the ioctl
1724// pHba->state |= DPTI_STATE_IOCTL;
1725// We can't set this now - The scsi subsystem sets host_blocked and
1726// the queue empties and stops. We need a way to restart the queue
1727 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1728 if (rcode != 0)
1729 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1730 rcode, reply);
1731// pHba->state &= ~DPTI_STATE_IOCTL;
1732 if(pHba->host)
1733 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1734 } while(rcode == -ETIMEDOUT);
1735
1736 if(rcode){
1737 goto cleanup;
1738 }
1739
1740 if(sg_offset) {
1741 /* Copy back the Scatter Gather buffers back to user space */
1742 u32 j;
1743 // TODO 64bit fix
1744 struct sg_simple_element* sg;
1745 int sg_size;
1746
1747 // re-acquire the original message to handle correctly the sg copy operation
1748 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1749 // get user msg size in u32s
1750 if(get_user(size, &user_msg[0])){
1751 rcode = -EFAULT;
1752 goto cleanup;
1753 }
1754 size = size>>16;
1755 size *= 4;
1756 /* Copy in the user's I2O command */
1757 if (copy_from_user (msg, user_msg, size)) {
1758 rcode = -EFAULT;
1759 goto cleanup;
1760 }
1761 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1762
1763 // TODO 64bit fix
1764 sg = (struct sg_simple_element*)(msg + sg_offset);
1765 for (j = 0; j < sg_count; j++) {
1766 /* Copy out the SG list to user's buffer if necessary */
1767 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1768 sg_size = sg[j].flag_count & 0xffffff;
1769 // TODO 64bit fix
1770 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1771 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1772 rcode = -EFAULT;
1773 goto cleanup;
1774 }
1775 }
1776 }
1777 }
1778
1779 /* Copy back the reply to user space */
1780 if (reply_size) {
1781 // we wrote our own values for context - now restore the user supplied ones
1782 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1783 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1784 rcode = -EFAULT;
1785 }
1786 if(copy_to_user(user_reply, reply, reply_size)) {
1787 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1788 rcode = -EFAULT;
1789 }
1790 }
1791
1792
1793cleanup:
1794 if (rcode != -ETIME && rcode != -EINTR)
1795 kfree (reply);
1796 while(sg_index) {
1797 if(sg_list[--sg_index]) {
1798 if (rcode != -ETIME && rcode != -EINTR)
1799 kfree(sg_list[sg_index]);
1800 }
1801 }
1802 return rcode;
1803}
1804
1805
1806/*
1807 * This routine returns information about the system. This does not effect
1808 * any logic and if the info is wrong - it doesn't matter.
1809 */
1810
1811/* Get all the info we can not get from kernel services */
1812static int adpt_system_info(void __user *buffer)
1813{
1814 sysInfo_S si;
1815
1816 memset(&si, 0, sizeof(si));
1817
1818 si.osType = OS_LINUX;
Adrian Bunka4cd16e2005-06-25 14:59:01 -07001819 si.osMajorVersion = 0;
1820 si.osMinorVersion = 0;
1821 si.osRevision = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 si.busType = SI_PCI_BUS;
1823 si.processorFamily = DPTI_sig.dsProcessorFamily;
1824
1825#if defined __i386__
1826 adpt_i386_info(&si);
1827#elif defined (__ia64__)
1828 adpt_ia64_info(&si);
1829#elif defined(__sparc__)
1830 adpt_sparc_info(&si);
1831#elif defined (__alpha__)
1832 adpt_alpha_info(&si);
1833#else
1834 si.processorType = 0xff ;
1835#endif
1836 if(copy_to_user(buffer, &si, sizeof(si))){
1837 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1838 return -EFAULT;
1839 }
1840
1841 return 0;
1842}
1843
1844#if defined __ia64__
1845static void adpt_ia64_info(sysInfo_S* si)
1846{
1847 // This is all the info we need for now
1848 // We will add more info as our new
1849 // managmenent utility requires it
1850 si->processorType = PROC_IA64;
1851}
1852#endif
1853
1854
1855#if defined __sparc__
1856static void adpt_sparc_info(sysInfo_S* si)
1857{
1858 // This is all the info we need for now
1859 // We will add more info as our new
1860 // managmenent utility requires it
1861 si->processorType = PROC_ULTRASPARC;
1862}
1863#endif
1864
1865#if defined __alpha__
1866static void adpt_alpha_info(sysInfo_S* si)
1867{
1868 // This is all the info we need for now
1869 // We will add more info as our new
1870 // managmenent utility requires it
1871 si->processorType = PROC_ALPHA;
1872}
1873#endif
1874
1875#if defined __i386__
1876
1877static void adpt_i386_info(sysInfo_S* si)
1878{
1879 // This is all the info we need for now
1880 // We will add more info as our new
1881 // managmenent utility requires it
1882 switch (boot_cpu_data.x86) {
1883 case CPU_386:
1884 si->processorType = PROC_386;
1885 break;
1886 case CPU_486:
1887 si->processorType = PROC_486;
1888 break;
1889 case CPU_586:
1890 si->processorType = PROC_PENTIUM;
1891 break;
1892 default: // Just in case
1893 si->processorType = PROC_PENTIUM;
1894 break;
1895 }
1896}
1897
1898#endif
1899
1900
1901static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1902 ulong arg)
1903{
1904 int minor;
1905 int error = 0;
1906 adpt_hba* pHba;
1907 ulong flags = 0;
1908 void __user *argp = (void __user *)arg;
1909
1910 minor = iminor(inode);
1911 if (minor >= DPTI_MAX_HBA){
1912 return -ENXIO;
1913 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001914 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1916 if (pHba->unit == minor) {
1917 break; /* found adapter */
1918 }
1919 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001920 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if(pHba == NULL){
1922 return -ENXIO;
1923 }
1924
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001925 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1926 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
1928 switch (cmd) {
1929 // TODO: handle 3 cases
1930 case DPT_SIGNATURE:
1931 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1932 return -EFAULT;
1933 }
1934 break;
1935 case I2OUSRCMD:
1936 return adpt_i2o_passthru(pHba, argp);
1937
1938 case DPT_CTRLINFO:{
1939 drvrHBAinfo_S HbaInfo;
1940
1941#define FLG_OSD_PCI_VALID 0x0001
1942#define FLG_OSD_DMA 0x0002
1943#define FLG_OSD_I2O 0x0004
1944 memset(&HbaInfo, 0, sizeof(HbaInfo));
1945 HbaInfo.drvrHBAnum = pHba->unit;
1946 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1947 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1948 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1949 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1950 HbaInfo.Interrupt = pHba->pDev->irq;
1951 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1952 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1953 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1954 return -EFAULT;
1955 }
1956 break;
1957 }
1958 case DPT_SYSINFO:
1959 return adpt_system_info(argp);
1960 case DPT_BLINKLED:{
1961 u32 value;
1962 value = (u32)adpt_read_blink_led(pHba);
1963 if (copy_to_user(argp, &value, sizeof(value))) {
1964 return -EFAULT;
1965 }
1966 break;
1967 }
1968 case I2ORESETCMD:
1969 if(pHba->host)
1970 spin_lock_irqsave(pHba->host->host_lock, flags);
1971 adpt_hba_reset(pHba);
1972 if(pHba->host)
1973 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1974 break;
1975 case I2ORESCANCMD:
1976 adpt_rescan(pHba);
1977 break;
1978 default:
1979 return -EINVAL;
1980 }
1981
1982 return error;
1983}
1984
1985
David Howells7d12e782006-10-05 14:55:46 +01001986static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987{
1988 struct scsi_cmnd* cmd;
1989 adpt_hba* pHba = dev_id;
1990 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001991 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 u32 status=0;
1993 u32 context;
1994 ulong flags = 0;
1995 int handled = 0;
1996
1997 if (pHba == NULL){
1998 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1999 return IRQ_NONE;
2000 }
2001 if(pHba->host)
2002 spin_lock_irqsave(pHba->host->host_lock, flags);
2003
2004 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2005 m = readl(pHba->reply_port);
2006 if(m == EMPTY_QUEUE){
2007 // Try twice then give up
2008 rmb();
2009 m = readl(pHba->reply_port);
2010 if(m == EMPTY_QUEUE){
2011 // This really should not happen
2012 printk(KERN_ERR"dpti: Could not get reply frame\n");
2013 goto out;
2014 }
2015 }
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002016 reply = bus_to_virt(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 if (readl(reply) & MSG_FAIL) {
2019 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002020 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 u32 old_context;
2022 PDEBUG("%s: Failed message\n",pHba->name);
2023 if(old_m >= 0x100000){
2024 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2025 writel(m,pHba->reply_port);
2026 continue;
2027 }
2028 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002029 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 old_context = readl(msg+12);
2031 writel(old_context, reply+12);
2032 adpt_send_nop(pHba, old_m);
2033 }
2034 context = readl(reply+8);
2035 if(context & 0x40000000){ // IOCTL
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002036 void *p = (void *)readl(reply+12);
2037 if( p != NULL) {
2038 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 }
2040 // All IOCTLs will also be post wait
2041 }
2042 if(context & 0x80000000){ // Post wait message
2043 status = readl(reply+16);
2044 if(status >> 24){
2045 status &= 0xffff; /* Get detail status */
2046 } else {
2047 status = I2O_POST_WAIT_OK;
2048 }
2049 if(!(context & 0x40000000)) {
2050 cmd = (struct scsi_cmnd*) readl(reply+12);
2051 if(cmd != NULL) {
2052 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2053 }
2054 }
2055 adpt_i2o_post_wait_complete(context, status);
2056 } else { // SCSI message
2057 cmd = (struct scsi_cmnd*) readl(reply+12);
2058 if(cmd != NULL){
2059 if(cmd->serial_number != 0) { // If not timedout
2060 adpt_i2o_to_scsi(reply, cmd);
2061 }
2062 }
2063 }
2064 writel(m, pHba->reply_port);
2065 wmb();
2066 rmb();
2067 }
2068 handled = 1;
2069out: if(pHba->host)
2070 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2071 return IRQ_RETVAL(handled);
2072}
2073
2074static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2075{
2076 int i;
2077 u32 msg[MAX_MESSAGE_SIZE];
2078 u32* mptr;
2079 u32 *lenptr;
2080 int direction;
2081 int scsidir;
2082 u32 len;
2083 u32 reqlen;
2084 s32 rcode;
2085
2086 memset(msg, 0 , sizeof(msg));
2087 len = cmd->request_bufflen;
2088 direction = 0x00000000;
2089
2090 scsidir = 0x00000000; // DATA NO XFER
2091 if(len) {
2092 /*
2093 * Set SCBFlags to indicate if data is being transferred
2094 * in or out, or no data transfer
2095 * Note: Do not have to verify index is less than 0 since
2096 * cmd->cmnd[0] is an unsigned char
2097 */
2098 switch(cmd->sc_data_direction){
2099 case DMA_FROM_DEVICE:
2100 scsidir =0x40000000; // DATA IN (iop<--dev)
2101 break;
2102 case DMA_TO_DEVICE:
2103 direction=0x04000000; // SGL OUT
2104 scsidir =0x80000000; // DATA OUT (iop-->dev)
2105 break;
2106 case DMA_NONE:
2107 break;
2108 case DMA_BIDIRECTIONAL:
2109 scsidir =0x40000000; // DATA IN (iop<--dev)
2110 // Assume In - and continue;
2111 break;
2112 default:
2113 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2114 pHba->name, cmd->cmnd[0]);
2115 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2116 cmd->scsi_done(cmd);
2117 return 0;
2118 }
2119 }
2120 // msg[0] is set later
2121 // I2O_CMD_SCSI_EXEC
2122 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2123 msg[2] = 0;
2124 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2125 // Our cards use the transaction context as the tag for queueing
2126 // Adaptec/DPT Private stuff
2127 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2128 msg[5] = d->tid;
2129 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2130 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2131 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2132 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2133 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2134
2135 mptr=msg+7;
2136
2137 // Write SCSI command into the message - always 16 byte block
2138 memset(mptr, 0, 16);
2139 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2140 mptr+=4;
2141 lenptr=mptr++; /* Remember me - fill in when we know */
2142 reqlen = 14; // SINGLE SGE
2143 /* Now fill in the SGList and command */
2144 if(cmd->use_sg) {
2145 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2146 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2147 cmd->sc_data_direction);
2148
2149
2150 len = 0;
2151 for(i = 0 ; i < sg_count; i++) {
2152 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2153 len+=sg_dma_len(sg);
2154 *mptr++ = sg_dma_address(sg);
2155 sg++;
2156 }
2157 /* Make this an end of list */
2158 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
2159 reqlen = mptr - msg;
2160 *lenptr = len;
2161
2162 if(cmd->underflow && len != cmd->underflow){
2163 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2164 len, cmd->underflow);
2165 }
2166 } else {
2167 *lenptr = len = cmd->request_bufflen;
2168 if(len == 0) {
2169 reqlen = 12;
2170 } else {
2171 *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2172 *mptr++ = pci_map_single(pHba->pDev,
2173 cmd->request_buffer,
2174 cmd->request_bufflen,
2175 cmd->sc_data_direction);
2176 }
2177 }
2178
2179 /* Stick the headers on */
2180 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2181
2182 // Send it on it's way
2183 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2184 if (rcode == 0) {
2185 return 0;
2186 }
2187 return rcode;
2188}
2189
2190
2191static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
2192{
2193 struct Scsi_Host *host = NULL;
2194
2195 host = scsi_register(sht, sizeof(adpt_hba*));
2196 if (host == NULL) {
2197 printk ("%s: scsi_register returned NULL\n",pHba->name);
2198 return -1;
2199 }
2200 host->hostdata[0] = (unsigned long)pHba;
2201 pHba->host = host;
2202
2203 host->irq = pHba->pDev->irq;
2204 /* no IO ports, so don't have to set host->io_port and
2205 * host->n_io_port
2206 */
2207 host->io_port = 0;
2208 host->n_io_port = 0;
Hennede77aaf2006-10-04 10:22:09 +02002209 /* see comments in scsi_host.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 host->max_id = 16;
2211 host->max_lun = 256;
2212 host->max_channel = pHba->top_scsi_channel + 1;
2213 host->cmd_per_lun = 1;
2214 host->unique_id = (uint) pHba;
2215 host->sg_tablesize = pHba->sg_tablesize;
2216 host->can_queue = pHba->post_fifo_size;
2217
2218 return 0;
2219}
2220
2221
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002222static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223{
2224 adpt_hba* pHba;
2225 u32 hba_status;
2226 u32 dev_status;
2227 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2228 // I know this would look cleaner if I just read bytes
2229 // but the model I have been using for all the rest of the
2230 // io is in 4 byte words - so I keep that model
2231 u16 detailed_status = readl(reply+16) &0xffff;
2232 dev_status = (detailed_status & 0xff);
2233 hba_status = detailed_status >> 8;
2234
2235 // calculate resid for sg
2236 cmd->resid = cmd->request_bufflen - readl(reply+5);
2237
2238 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2239
2240 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2241
2242 if(!(reply_flags & MSG_FAIL)) {
2243 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2244 case I2O_SCSI_DSC_SUCCESS:
2245 cmd->result = (DID_OK << 16);
2246 // handle underflow
2247 if(readl(reply+5) < cmd->underflow ) {
2248 cmd->result = (DID_ERROR <<16);
2249 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2250 }
2251 break;
2252 case I2O_SCSI_DSC_REQUEST_ABORTED:
2253 cmd->result = (DID_ABORT << 16);
2254 break;
2255 case I2O_SCSI_DSC_PATH_INVALID:
2256 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2257 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2258 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2259 case I2O_SCSI_DSC_NO_ADAPTER:
2260 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2261 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2262 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2263 cmd->result = (DID_TIME_OUT << 16);
2264 break;
2265 case I2O_SCSI_DSC_ADAPTER_BUSY:
2266 case I2O_SCSI_DSC_BUS_BUSY:
2267 cmd->result = (DID_BUS_BUSY << 16);
2268 break;
2269 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2270 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2271 cmd->result = (DID_RESET << 16);
2272 break;
2273 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2274 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2275 cmd->result = (DID_PARITY << 16);
2276 break;
2277 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2278 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2279 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2280 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2281 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2282 case I2O_SCSI_DSC_DATA_OVERRUN:
2283 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2284 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2285 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2286 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2287 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2288 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2289 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2290 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2291 case I2O_SCSI_DSC_INVALID_CDB:
2292 case I2O_SCSI_DSC_LUN_INVALID:
2293 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2294 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2295 case I2O_SCSI_DSC_NO_NEXUS:
2296 case I2O_SCSI_DSC_CDB_RECEIVED:
2297 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2298 case I2O_SCSI_DSC_QUEUE_FROZEN:
2299 case I2O_SCSI_DSC_REQUEST_INVALID:
2300 default:
2301 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2302 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2303 hba_status, dev_status, cmd->cmnd[0]);
2304 cmd->result = (DID_ERROR << 16);
2305 break;
2306 }
2307
2308 // copy over the request sense data if it was a check
2309 // condition status
2310 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2311 u32 len = sizeof(cmd->sense_buffer);
2312 len = (len > 40) ? 40 : len;
2313 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002314 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2316 cmd->sense_buffer[2] == DATA_PROTECT ){
2317 /* This is to handle an array failed */
2318 cmd->result = (DID_TIME_OUT << 16);
2319 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2320 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2321 hba_status, dev_status, cmd->cmnd[0]);
2322
2323 }
2324 }
2325 } else {
2326 /* In this condtion we could not talk to the tid
2327 * the card rejected it. We should signal a retry
2328 * for a limitted number of retries.
2329 */
2330 cmd->result = (DID_TIME_OUT << 16);
2331 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2332 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2333 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2334 }
2335
2336 cmd->result |= (dev_status);
2337
2338 if(cmd->scsi_done != NULL){
2339 cmd->scsi_done(cmd);
2340 }
2341 return cmd->result;
2342}
2343
2344
2345static s32 adpt_rescan(adpt_hba* pHba)
2346{
2347 s32 rcode;
2348 ulong flags = 0;
2349
2350 if(pHba->host)
2351 spin_lock_irqsave(pHba->host->host_lock, flags);
2352 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2353 goto out;
2354 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2355 goto out;
2356 rcode = 0;
2357out: if(pHba->host)
2358 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2359 return rcode;
2360}
2361
2362
2363static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2364{
2365 int i;
2366 int max;
2367 int tid;
2368 struct i2o_device *d;
2369 i2o_lct *lct = pHba->lct;
2370 u8 bus_no = 0;
2371 s16 scsi_id;
2372 s16 scsi_lun;
2373 u32 buf[10]; // at least 8 u32's
2374 struct adpt_device* pDev = NULL;
2375 struct i2o_device* pI2o_dev = NULL;
2376
2377 if (lct == NULL) {
2378 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2379 return -1;
2380 }
2381
2382 max = lct->table_size;
2383 max -= 3;
2384 max /= 9;
2385
2386 // Mark each drive as unscanned
2387 for (d = pHba->devices; d; d = d->next) {
2388 pDev =(struct adpt_device*) d->owner;
2389 if(!pDev){
2390 continue;
2391 }
2392 pDev->state |= DPTI_DEV_UNSCANNED;
2393 }
2394
2395 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2396
2397 for(i=0;i<max;i++) {
2398 if( lct->lct_entry[i].user_tid != 0xfff){
2399 continue;
2400 }
2401
2402 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2403 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2404 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2405 tid = lct->lct_entry[i].tid;
2406 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2407 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2408 continue;
2409 }
2410 bus_no = buf[0]>>16;
2411 scsi_id = buf[1];
2412 scsi_lun = (buf[2]>>8 )&0xff;
2413 pDev = pHba->channel[bus_no].device[scsi_id];
2414 /* da lun */
2415 while(pDev) {
2416 if(pDev->scsi_lun == scsi_lun) {
2417 break;
2418 }
2419 pDev = pDev->next_lun;
2420 }
2421 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002422 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 if(d==NULL)
2424 {
2425 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2426 return -ENOMEM;
2427 }
2428
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002429 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 d->next = NULL;
2431
2432 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2433
2434 d->flags = 0;
2435 adpt_i2o_report_hba_unit(pHba, d);
2436 adpt_i2o_install_device(pHba, d);
2437
2438 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2439 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2440 continue;
2441 }
2442 pDev = pHba->channel[bus_no].device[scsi_id];
2443 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302444 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 if(pDev == NULL) {
2446 return -ENOMEM;
2447 }
2448 pHba->channel[bus_no].device[scsi_id] = pDev;
2449 } else {
2450 while (pDev->next_lun) {
2451 pDev = pDev->next_lun;
2452 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302453 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 if(pDev == NULL) {
2455 return -ENOMEM;
2456 }
2457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 pDev->tid = d->lct_data.tid;
2459 pDev->scsi_channel = bus_no;
2460 pDev->scsi_id = scsi_id;
2461 pDev->scsi_lun = scsi_lun;
2462 pDev->pI2o_dev = d;
2463 d->owner = pDev;
2464 pDev->type = (buf[0])&0xff;
2465 pDev->flags = (buf[0]>>8)&0xff;
2466 // Too late, SCSI system has made up it's mind, but what the hey ...
2467 if(scsi_id > pHba->top_scsi_id){
2468 pHba->top_scsi_id = scsi_id;
2469 }
2470 if(scsi_lun > pHba->top_scsi_lun){
2471 pHba->top_scsi_lun = scsi_lun;
2472 }
2473 continue;
2474 } // end of new i2o device
2475
2476 // We found an old device - check it
2477 while(pDev) {
2478 if(pDev->scsi_lun == scsi_lun) {
2479 if(!scsi_device_online(pDev->pScsi_dev)) {
2480 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2481 pHba->name,bus_no,scsi_id,scsi_lun);
2482 if (pDev->pScsi_dev) {
2483 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2484 }
2485 }
2486 d = pDev->pI2o_dev;
2487 if(d->lct_data.tid != tid) { // something changed
2488 pDev->tid = tid;
2489 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2490 if (pDev->pScsi_dev) {
2491 pDev->pScsi_dev->changed = TRUE;
2492 pDev->pScsi_dev->removable = TRUE;
2493 }
2494 }
2495 // Found it - mark it scanned
2496 pDev->state = DPTI_DEV_ONLINE;
2497 break;
2498 }
2499 pDev = pDev->next_lun;
2500 }
2501 }
2502 }
2503 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2504 pDev =(struct adpt_device*) pI2o_dev->owner;
2505 if(!pDev){
2506 continue;
2507 }
2508 // Drive offline drives that previously existed but could not be found
2509 // in the LCT table
2510 if (pDev->state & DPTI_DEV_UNSCANNED){
2511 pDev->state = DPTI_DEV_OFFLINE;
2512 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2513 if (pDev->pScsi_dev) {
2514 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2515 }
2516 }
2517 }
2518 return 0;
2519}
2520
2521static void adpt_fail_posted_scbs(adpt_hba* pHba)
2522{
2523 struct scsi_cmnd* cmd = NULL;
2524 struct scsi_device* d = NULL;
2525
2526 shost_for_each_device(d, pHba->host) {
2527 unsigned long flags;
2528 spin_lock_irqsave(&d->list_lock, flags);
2529 list_for_each_entry(cmd, &d->cmd_list, list) {
2530 if(cmd->serial_number == 0){
2531 continue;
2532 }
2533 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2534 cmd->scsi_done(cmd);
2535 }
2536 spin_unlock_irqrestore(&d->list_lock, flags);
2537 }
2538}
2539
2540
2541/*============================================================================
2542 * Routines from i2o subsystem
2543 *============================================================================
2544 */
2545
2546
2547
2548/*
2549 * Bring an I2O controller into HOLD state. See the spec.
2550 */
2551static int adpt_i2o_activate_hba(adpt_hba* pHba)
2552{
2553 int rcode;
2554
2555 if(pHba->initialized ) {
2556 if (adpt_i2o_status_get(pHba) < 0) {
2557 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2558 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2559 return rcode;
2560 }
2561 if (adpt_i2o_status_get(pHba) < 0) {
2562 printk(KERN_INFO "HBA not responding.\n");
2563 return -1;
2564 }
2565 }
2566
2567 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2568 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2569 return -1;
2570 }
2571
2572 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2573 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2574 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2575 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2576 adpt_i2o_reset_hba(pHba);
2577 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2578 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2579 return -1;
2580 }
2581 }
2582 } else {
2583 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2584 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2585 return rcode;
2586 }
2587
2588 }
2589
2590 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2591 return -1;
2592 }
2593
2594 /* In HOLD state */
2595
2596 if (adpt_i2o_hrt_get(pHba) < 0) {
2597 return -1;
2598 }
2599
2600 return 0;
2601}
2602
2603/*
2604 * Bring a controller online into OPERATIONAL state.
2605 */
2606
2607static int adpt_i2o_online_hba(adpt_hba* pHba)
2608{
2609 if (adpt_i2o_systab_send(pHba) < 0) {
2610 adpt_i2o_delete_hba(pHba);
2611 return -1;
2612 }
2613 /* In READY state */
2614
2615 if (adpt_i2o_enable_hba(pHba) < 0) {
2616 adpt_i2o_delete_hba(pHba);
2617 return -1;
2618 }
2619
2620 /* In OPERATIONAL state */
2621 return 0;
2622}
2623
2624static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2625{
2626 u32 __iomem *msg;
2627 ulong timeout = jiffies + 5*HZ;
2628
2629 while(m == EMPTY_QUEUE){
2630 rmb();
2631 m = readl(pHba->post_port);
2632 if(m != EMPTY_QUEUE){
2633 break;
2634 }
2635 if(time_after(jiffies,timeout)){
2636 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2637 return 2;
2638 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002639 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 }
2641 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2642 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2643 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2644 writel( 0,&msg[2]);
2645 wmb();
2646
2647 writel(m, pHba->post_port);
2648 wmb();
2649 return 0;
2650}
2651
2652static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2653{
2654 u8 *status;
2655 u32 __iomem *msg = NULL;
2656 int i;
2657 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2658 u32* ptr;
2659 u32 outbound_frame; // This had to be a 32 bit address
2660 u32 m;
2661
2662 do {
2663 rmb();
2664 m = readl(pHba->post_port);
2665 if (m != EMPTY_QUEUE) {
2666 break;
2667 }
2668
2669 if(time_after(jiffies,timeout)){
2670 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2671 return -ETIMEDOUT;
2672 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002673 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 } while(m == EMPTY_QUEUE);
2675
2676 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2677
2678 status = kmalloc(4,GFP_KERNEL|ADDR32);
2679 if (status==NULL) {
2680 adpt_send_nop(pHba, m);
2681 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2682 pHba->name);
2683 return -ENOMEM;
2684 }
2685 memset(status, 0, 4);
2686
2687 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2688 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2689 writel(0, &msg[2]);
2690 writel(0x0106, &msg[3]); /* Transaction context */
2691 writel(4096, &msg[4]); /* Host page frame size */
2692 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2693 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2694 writel(virt_to_bus(status), &msg[7]);
2695
2696 writel(m, pHba->post_port);
2697 wmb();
2698
2699 // Wait for the reply status to come back
2700 do {
2701 if (*status) {
2702 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2703 break;
2704 }
2705 }
2706 rmb();
2707 if(time_after(jiffies,timeout)){
2708 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2709 return -ETIMEDOUT;
2710 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002711 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 } while (1);
2713
2714 // If the command was successful, fill the fifo with our reply
2715 // message packets
2716 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002717 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 return -2;
2719 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002720 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002722 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002724 pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 if(!pHba->reply_pool){
2726 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2727 return -1;
2728 }
2729 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2730
2731 ptr = pHba->reply_pool;
2732 for(i = 0; i < pHba->reply_fifo_size; i++) {
2733 outbound_frame = (u32)virt_to_bus(ptr);
2734 writel(outbound_frame, pHba->reply_port);
2735 wmb();
2736 ptr += REPLY_FRAME_SIZE;
2737 }
2738 adpt_i2o_status_get(pHba);
2739 return 0;
2740}
2741
2742
2743/*
2744 * I2O System Table. Contains information about
2745 * all the IOPs in the system. Used to inform IOPs
2746 * about each other's existence.
2747 *
2748 * sys_tbl_ver is the CurrentChangeIndicator that is
2749 * used by IOPs to track changes.
2750 */
2751
2752
2753
2754static s32 adpt_i2o_status_get(adpt_hba* pHba)
2755{
2756 ulong timeout;
2757 u32 m;
2758 u32 __iomem *msg;
2759 u8 *status_block=NULL;
2760 ulong status_block_bus;
2761
2762 if(pHba->status_block == NULL) {
2763 pHba->status_block = (i2o_status_block*)
2764 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2765 if(pHba->status_block == NULL) {
2766 printk(KERN_ERR
2767 "dpti%d: Get Status Block failed; Out of memory. \n",
2768 pHba->unit);
2769 return -ENOMEM;
2770 }
2771 }
2772 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2773 status_block = (u8*)(pHba->status_block);
2774 status_block_bus = virt_to_bus(pHba->status_block);
2775 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2776 do {
2777 rmb();
2778 m = readl(pHba->post_port);
2779 if (m != EMPTY_QUEUE) {
2780 break;
2781 }
2782 if(time_after(jiffies,timeout)){
2783 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2784 pHba->name);
2785 return -ETIMEDOUT;
2786 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002787 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 } while(m==EMPTY_QUEUE);
2789
2790
2791 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2792
2793 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2794 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2795 writel(1, &msg[2]);
2796 writel(0, &msg[3]);
2797 writel(0, &msg[4]);
2798 writel(0, &msg[5]);
2799 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2800 writel(0, &msg[7]);
2801 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2802
2803 //post message
2804 writel(m, pHba->post_port);
2805 wmb();
2806
2807 while(status_block[87]!=0xff){
2808 if(time_after(jiffies,timeout)){
2809 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2810 pHba->unit);
2811 return -ETIMEDOUT;
2812 }
2813 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002814 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 }
2816
2817 // Set up our number of outbound and inbound messages
2818 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2819 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2820 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2821 }
2822
2823 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2824 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2825 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2826 }
2827
2828 // Calculate the Scatter Gather list size
2829 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2830 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2831 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2832 }
2833
2834
2835#ifdef DEBUG
2836 printk("dpti%d: State = ",pHba->unit);
2837 switch(pHba->status_block->iop_state) {
2838 case 0x01:
2839 printk("INIT\n");
2840 break;
2841 case 0x02:
2842 printk("RESET\n");
2843 break;
2844 case 0x04:
2845 printk("HOLD\n");
2846 break;
2847 case 0x05:
2848 printk("READY\n");
2849 break;
2850 case 0x08:
2851 printk("OPERATIONAL\n");
2852 break;
2853 case 0x10:
2854 printk("FAILED\n");
2855 break;
2856 case 0x11:
2857 printk("FAULTED\n");
2858 break;
2859 default:
2860 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2861 }
2862#endif
2863 return 0;
2864}
2865
2866/*
2867 * Get the IOP's Logical Configuration Table
2868 */
2869static int adpt_i2o_lct_get(adpt_hba* pHba)
2870{
2871 u32 msg[8];
2872 int ret;
2873 u32 buf[16];
2874
2875 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2876 pHba->lct_size = pHba->status_block->expected_lct_size;
2877 }
2878 do {
2879 if (pHba->lct == NULL) {
2880 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2881 if(pHba->lct == NULL) {
2882 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2883 pHba->name);
2884 return -ENOMEM;
2885 }
2886 }
2887 memset(pHba->lct, 0, pHba->lct_size);
2888
2889 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2890 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2891 msg[2] = 0;
2892 msg[3] = 0;
2893 msg[4] = 0xFFFFFFFF; /* All devices */
2894 msg[5] = 0x00000000; /* Report now */
2895 msg[6] = 0xD0000000|pHba->lct_size;
2896 msg[7] = virt_to_bus(pHba->lct);
2897
2898 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2899 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2900 pHba->name, ret);
2901 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2902 return ret;
2903 }
2904
2905 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2906 pHba->lct_size = pHba->lct->table_size << 2;
2907 kfree(pHba->lct);
2908 pHba->lct = NULL;
2909 }
2910 } while (pHba->lct == NULL);
2911
2912 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2913
2914
2915 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2916 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2917 pHba->FwDebugBufferSize = buf[1];
2918 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2919 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2920 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2921 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2922 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2923 pHba->FwDebugBuffer_P += buf[2];
2924 pHba->FwDebugFlags = 0;
2925 }
2926
2927 return 0;
2928}
2929
2930static int adpt_i2o_build_sys_table(void)
2931{
2932 adpt_hba* pHba = NULL;
2933 int count = 0;
2934
2935 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2936 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2937
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002938 kfree(sys_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
2940 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2941 if(!sys_tbl) {
2942 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2943 return -ENOMEM;
2944 }
2945 memset(sys_tbl, 0, sys_tbl_len);
2946
2947 sys_tbl->num_entries = hba_count;
2948 sys_tbl->version = I2OVERSION;
2949 sys_tbl->change_ind = sys_tbl_ind++;
2950
2951 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2952 // Get updated Status Block so we have the latest information
2953 if (adpt_i2o_status_get(pHba)) {
2954 sys_tbl->num_entries--;
2955 continue; // try next one
2956 }
2957
2958 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2959 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2960 sys_tbl->iops[count].seg_num = 0;
2961 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2962 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2963 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2964 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2965 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2966 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002967 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2968 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969
2970 count++;
2971 }
2972
2973#ifdef DEBUG
2974{
2975 u32 *table = (u32*)sys_tbl;
2976 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2977 for(count = 0; count < (sys_tbl_len >>2); count++) {
2978 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2979 count, table[count]);
2980 }
2981}
2982#endif
2983
2984 return 0;
2985}
2986
2987
2988/*
2989 * Dump the information block associated with a given unit (TID)
2990 */
2991
2992static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2993{
2994 char buf[64];
2995 int unit = d->lct_data.tid;
2996
2997 printk(KERN_INFO "TID %3.3d ", unit);
2998
2999 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3000 {
3001 buf[16]=0;
3002 printk(" Vendor: %-12.12s", buf);
3003 }
3004 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3005 {
3006 buf[16]=0;
3007 printk(" Device: %-12.12s", buf);
3008 }
3009 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3010 {
3011 buf[8]=0;
3012 printk(" Rev: %-12.12s\n", buf);
3013 }
3014#ifdef DEBUG
3015 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3016 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3017 printk(KERN_INFO "\tFlags: ");
3018
3019 if(d->lct_data.device_flags&(1<<0))
3020 printk("C"); // ConfigDialog requested
3021 if(d->lct_data.device_flags&(1<<1))
3022 printk("U"); // Multi-user capable
3023 if(!(d->lct_data.device_flags&(1<<4)))
3024 printk("P"); // Peer service enabled!
3025 if(!(d->lct_data.device_flags&(1<<5)))
3026 printk("M"); // Mgmt service enabled!
3027 printk("\n");
3028#endif
3029}
3030
3031#ifdef DEBUG
3032/*
3033 * Do i2o class name lookup
3034 */
3035static const char *adpt_i2o_get_class_name(int class)
3036{
3037 int idx = 16;
3038 static char *i2o_class_name[] = {
3039 "Executive",
3040 "Device Driver Module",
3041 "Block Device",
3042 "Tape Device",
3043 "LAN Interface",
3044 "WAN Interface",
3045 "Fibre Channel Port",
3046 "Fibre Channel Device",
3047 "SCSI Device",
3048 "ATE Port",
3049 "ATE Device",
3050 "Floppy Controller",
3051 "Floppy Device",
3052 "Secondary Bus Port",
3053 "Peer Transport Agent",
3054 "Peer Transport",
3055 "Unknown"
3056 };
3057
3058 switch(class&0xFFF) {
3059 case I2O_CLASS_EXECUTIVE:
3060 idx = 0; break;
3061 case I2O_CLASS_DDM:
3062 idx = 1; break;
3063 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3064 idx = 2; break;
3065 case I2O_CLASS_SEQUENTIAL_STORAGE:
3066 idx = 3; break;
3067 case I2O_CLASS_LAN:
3068 idx = 4; break;
3069 case I2O_CLASS_WAN:
3070 idx = 5; break;
3071 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3072 idx = 6; break;
3073 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3074 idx = 7; break;
3075 case I2O_CLASS_SCSI_PERIPHERAL:
3076 idx = 8; break;
3077 case I2O_CLASS_ATE_PORT:
3078 idx = 9; break;
3079 case I2O_CLASS_ATE_PERIPHERAL:
3080 idx = 10; break;
3081 case I2O_CLASS_FLOPPY_CONTROLLER:
3082 idx = 11; break;
3083 case I2O_CLASS_FLOPPY_DEVICE:
3084 idx = 12; break;
3085 case I2O_CLASS_BUS_ADAPTER_PORT:
3086 idx = 13; break;
3087 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3088 idx = 14; break;
3089 case I2O_CLASS_PEER_TRANSPORT:
3090 idx = 15; break;
3091 }
3092 return i2o_class_name[idx];
3093}
3094#endif
3095
3096
3097static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3098{
3099 u32 msg[6];
3100 int ret, size = sizeof(i2o_hrt);
3101
3102 do {
3103 if (pHba->hrt == NULL) {
3104 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3105 if (pHba->hrt == NULL) {
3106 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3107 return -ENOMEM;
3108 }
3109 }
3110
3111 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3112 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3113 msg[2]= 0;
3114 msg[3]= 0;
3115 msg[4]= (0xD0000000 | size); /* Simple transaction */
3116 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3117
3118 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3119 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3120 return ret;
3121 }
3122
3123 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3124 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3125 kfree(pHba->hrt);
3126 pHba->hrt = NULL;
3127 }
3128 } while(pHba->hrt == NULL);
3129 return 0;
3130}
3131
3132/*
3133 * Query one scalar group value or a whole scalar group.
3134 */
3135static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3136 int group, int field, void *buf, int buflen)
3137{
3138 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3139 u8 *resblk;
3140
3141 int size;
3142
3143 /* 8 bytes for header */
3144 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3145 if (resblk == NULL) {
3146 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3147 return -ENOMEM;
3148 }
3149
3150 if (field == -1) /* whole group */
3151 opblk[4] = -1;
3152
3153 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3154 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3155 if (size == -ETIME) {
3156 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3157 return -ETIME;
3158 } else if (size == -EINTR) {
3159 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3160 return -EINTR;
3161 }
3162
3163 memcpy(buf, resblk+8, buflen); /* cut off header */
3164
3165 kfree(resblk);
3166 if (size < 0)
3167 return size;
3168
3169 return buflen;
3170}
3171
3172
3173/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3174 *
3175 * This function can be used for all UtilParamsGet/Set operations.
3176 * The OperationBlock is given in opblk-buffer,
3177 * and results are returned in resblk-buffer.
3178 * Note that the minimum sized resblk is 8 bytes and contains
3179 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3180 */
3181static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3182 void *opblk, int oplen, void *resblk, int reslen)
3183{
3184 u32 msg[9];
3185 u32 *res = (u32 *)resblk;
3186 int wait_status;
3187
3188 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3189 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3190 msg[2] = 0;
3191 msg[3] = 0;
3192 msg[4] = 0;
3193 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3194 msg[6] = virt_to_bus(opblk);
3195 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3196 msg[8] = virt_to_bus(resblk);
3197
3198 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3199 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3200 return wait_status; /* -DetailedStatus */
3201 }
3202
3203 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3204 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3205 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3206 pHba->name,
3207 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3208 : "PARAMS_GET",
3209 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3210 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3211 }
3212
3213 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3214}
3215
3216
3217static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3218{
3219 u32 msg[4];
3220 int ret;
3221
3222 adpt_i2o_status_get(pHba);
3223
3224 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3225
3226 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3227 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3228 return 0;
3229 }
3230
3231 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3232 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3233 msg[2] = 0;
3234 msg[3] = 0;
3235
3236 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3237 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3238 pHba->unit, -ret);
3239 } else {
3240 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3241 }
3242
3243 adpt_i2o_status_get(pHba);
3244 return ret;
3245}
3246
3247
3248/*
3249 * Enable IOP. Allows the IOP to resume external operations.
3250 */
3251static int adpt_i2o_enable_hba(adpt_hba* pHba)
3252{
3253 u32 msg[4];
3254 int ret;
3255
3256 adpt_i2o_status_get(pHba);
3257 if(!pHba->status_block){
3258 return -ENOMEM;
3259 }
3260 /* Enable only allowed on READY state */
3261 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3262 return 0;
3263
3264 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3265 return -EINVAL;
3266
3267 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3268 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3269 msg[2]= 0;
3270 msg[3]= 0;
3271
3272 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3273 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3274 pHba->name, ret);
3275 } else {
3276 PDEBUG("%s: Enabled.\n", pHba->name);
3277 }
3278
3279 adpt_i2o_status_get(pHba);
3280 return ret;
3281}
3282
3283
3284static int adpt_i2o_systab_send(adpt_hba* pHba)
3285{
3286 u32 msg[12];
3287 int ret;
3288
3289 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3290 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3291 msg[2] = 0;
3292 msg[3] = 0;
3293 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3294 msg[5] = 0; /* Segment 0 */
3295
3296 /*
3297 * Provide three SGL-elements:
3298 * System table (SysTab), Private memory space declaration and
3299 * Private i/o space declaration
3300 */
3301 msg[6] = 0x54000000 | sys_tbl_len;
3302 msg[7] = virt_to_phys(sys_tbl);
3303 msg[8] = 0x54000000 | 0;
3304 msg[9] = 0;
3305 msg[10] = 0xD4000000 | 0;
3306 msg[11] = 0;
3307
3308 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3309 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3310 pHba->name, ret);
3311 }
3312#ifdef DEBUG
3313 else {
3314 PINFO("%s: SysTab set.\n", pHba->name);
3315 }
3316#endif
3317
3318 return ret;
3319 }
3320
3321
3322/*============================================================================
3323 *
3324 *============================================================================
3325 */
3326
3327
3328#ifdef UARTDELAY
3329
3330static static void adpt_delay(int millisec)
3331{
3332 int i;
3333 for (i = 0; i < millisec; i++) {
3334 udelay(1000); /* delay for one millisecond */
3335 }
3336}
3337
3338#endif
3339
3340static struct scsi_host_template driver_template = {
3341 .name = "dpt_i2o",
3342 .proc_name = "dpt_i2o",
3343 .proc_info = adpt_proc_info,
3344 .detect = adpt_detect,
3345 .release = adpt_release,
3346 .info = adpt_info,
3347 .queuecommand = adpt_queue,
3348 .eh_abort_handler = adpt_abort,
3349 .eh_device_reset_handler = adpt_device_reset,
3350 .eh_bus_reset_handler = adpt_bus_reset,
3351 .eh_host_reset_handler = adpt_reset,
3352 .bios_param = adpt_bios_param,
3353 .slave_configure = adpt_slave_configure,
3354 .can_queue = MAX_TO_IOP_MESSAGES,
3355 .this_id = 7,
3356 .cmd_per_lun = 1,
3357 .use_clustering = ENABLE_CLUSTERING,
3358};
3359#include "scsi_module.c"
3360MODULE_LICENSE("GPL");