blob: 502732ac270ddcfa7cbecec1e282f9ff0c944c42 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
33 high pages. Keep the macro around because of the broken unmerged ia64 tree */
34
35#define ADDR32 (0)
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/module.h>
38
39MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
41
42////////////////////////////////////////////////////////////////
43
44#include <linux/ioctl.h> /* For SCSI-Passthrough */
45#include <asm/uaccess.h>
46
47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/pci.h> /* for PCI support */
50#include <linux/proc_fs.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h> /* for udelay */
53#include <linux/interrupt.h>
54#include <linux/kernel.h> /* for printk */
55#include <linux/sched.h>
56#include <linux/reboot.h>
57#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080058#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <linux/timer.h>
61#include <linux/string.h>
62#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010063#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#include <asm/processor.h> /* for boot_cpu_data */
66#include <asm/pgtable.h>
67#include <asm/io.h> /* for virt_to_bus, etc. */
68
69#include <scsi/scsi.h>
70#include <scsi/scsi_cmnd.h>
71#include <scsi/scsi_device.h>
72#include <scsi/scsi_host.h>
73#include <scsi/scsi_tcq.h>
74
75#include "dpt/dptsig.h"
76#include "dpti.h"
77
78/*============================================================================
79 * Create a binary signature - this is read by dptsig
80 * Needed for our management apps
81 *============================================================================
82 */
83static dpt_sig_S DPTI_sig = {
84 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
85#ifdef __i386__
86 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
87#elif defined(__ia64__)
88 PROC_INTEL, PROC_IA64,
89#elif defined(__sparc__)
90 PROC_ULTRASPARC, PROC_ULTRASPARC,
91#elif defined(__alpha__)
92 PROC_ALPHA, PROC_ALPHA,
93#else
94 (-1),(-1),
95#endif
96 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
97 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
98 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
99};
100
101
102
103
104/*============================================================================
105 * Globals
106 *============================================================================
107 */
108
Arjan van de Ven0b950672006-01-11 13:16:10 +0100109static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static struct i2o_sys_tbl *sys_tbl = NULL;
112static int sys_tbl_ind = 0;
113static int sys_tbl_len = 0;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static adpt_hba* hba_chain = NULL;
116static int hba_count = 0;
117
Arjan van de Ven00977a52007-02-12 00:55:34 -0800118static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .ioctl = adpt_ioctl,
120 .open = adpt_open,
121 .release = adpt_close
122};
123
124#ifdef REBOOT_NOTIFIER
125static struct notifier_block adpt_reboot_notifier =
126{
127 adpt_reboot_event,
128 NULL,
129 0
130};
131#endif
132
133/* Structures and definitions for synchronous message posting.
134 * See adpt_i2o_post_wait() for description
135 * */
136struct adpt_i2o_post_wait_data
137{
138 int status;
139 u32 id;
140 adpt_wait_queue_head_t *wq;
141 struct adpt_i2o_post_wait_data *next;
142};
143
144static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
145static u32 adpt_post_wait_id = 0;
146static DEFINE_SPINLOCK(adpt_post_wait_lock);
147
148
149/*============================================================================
150 * Functions
151 *============================================================================
152 */
153
154static u8 adpt_read_blink_led(adpt_hba* host)
155{
156 if(host->FwDebugBLEDflag_P != 0) {
157 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
158 return readb(host->FwDebugBLEDvalue_P);
159 }
160 }
161 return 0;
162}
163
164/*============================================================================
165 * Scsi host template interface functions
166 *============================================================================
167 */
168
169static struct pci_device_id dptids[] = {
170 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
171 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
172 { 0, }
173};
174MODULE_DEVICE_TABLE(pci,dptids);
175
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600176static void adpt_exit(void);
177
178static int adpt_detect(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 struct pci_dev *pDev = NULL;
181 adpt_hba* pHba;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 PINFO("Detecting Adaptec I2O RAID controllers...\n");
184
185 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100186 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 if(pDev->device == PCI_DPT_DEVICE_ID ||
188 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600189 if(adpt_install_hba(pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 PERROR("Could not Init an I2O RAID device\n");
191 PERROR("Will not try to detect others.\n");
192 return hba_count-1;
193 }
Alan Coxa07f3532006-09-15 15:34:32 +0100194 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 }
197
198 /* In INIT state, Activate IOPs */
199 for (pHba = hba_chain; pHba; pHba = pHba->next) {
200 // Activate does get status , init outbound, and get hrt
201 if (adpt_i2o_activate_hba(pHba) < 0) {
202 adpt_i2o_delete_hba(pHba);
203 }
204 }
205
206
207 /* Active IOPs in HOLD state */
208
209rebuild_sys_tab:
210 if (hba_chain == NULL)
211 return 0;
212
213 /*
214 * If build_sys_table fails, we kill everything and bail
215 * as we can't init the IOPs w/o a system table
216 */
217 if (adpt_i2o_build_sys_table() < 0) {
218 adpt_i2o_sys_shutdown();
219 return 0;
220 }
221
222 PDEBUG("HBA's in HOLD state\n");
223
224 /* If IOP don't get online, we need to rebuild the System table */
225 for (pHba = hba_chain; pHba; pHba = pHba->next) {
226 if (adpt_i2o_online_hba(pHba) < 0) {
227 adpt_i2o_delete_hba(pHba);
228 goto rebuild_sys_tab;
229 }
230 }
231
232 /* Active IOPs now in OPERATIONAL state */
233 PDEBUG("HBA's in OPERATIONAL state\n");
234
235 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
236 for (pHba = hba_chain; pHba; pHba = pHba->next) {
237 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
238 if (adpt_i2o_lct_get(pHba) < 0){
239 adpt_i2o_delete_hba(pHba);
240 continue;
241 }
242
243 if (adpt_i2o_parse_lct(pHba) < 0){
244 adpt_i2o_delete_hba(pHba);
245 continue;
246 }
247 adpt_inquiry(pHba);
248 }
249
250 for (pHba = hba_chain; pHba; pHba = pHba->next) {
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600251 if (adpt_scsi_register(pHba) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 adpt_i2o_delete_hba(pHba);
253 continue;
254 }
255 pHba->initialized = TRUE;
256 pHba->state &= ~DPTI_STATE_RESET;
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600257 scsi_scan_host(pHba->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
259
260 // Register our control device node
261 // nodes will need to be created in /dev to access this
262 // the nodes can not be created from within the driver
263 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600264 adpt_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return 0;
266 }
267 return hba_count;
268}
269
270
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600271static int adpt_release(adpt_hba *pHba)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600273 struct Scsi_Host *shost = pHba->host;
274 scsi_remove_host(shost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275// adpt_i2o_quiesce_hba(pHba);
276 adpt_i2o_delete_hba(pHba);
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600277 scsi_host_put(shost);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 return 0;
279}
280
281
282static void adpt_inquiry(adpt_hba* pHba)
283{
284 u32 msg[14];
285 u32 *mptr;
286 u32 *lenptr;
287 int direction;
288 int scsidir;
289 u32 len;
290 u32 reqlen;
291 u8* buf;
292 u8 scb[16];
293 s32 rcode;
294
295 memset(msg, 0, sizeof(msg));
Robert P. J. Day5cbded52006-12-13 00:35:56 -0800296 buf = kmalloc(80,GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 if(!buf){
298 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
299 return;
300 }
301 memset((void*)buf, 0, 36);
302
303 len = 36;
304 direction = 0x00000000;
305 scsidir =0x40000000; // DATA IN (iop<--dev)
306
307 reqlen = 14; // SINGLE SGE
308 /* Stick the headers on */
309 msg[0] = reqlen<<16 | SGL_OFFSET_12;
310 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
311 msg[2] = 0;
312 msg[3] = 0;
313 // Adaptec/DPT Private stuff
314 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
315 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
316 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
317 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
318 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
319 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
320 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
321
322 mptr=msg+7;
323
324 memset(scb, 0, sizeof(scb));
325 // Write SCSI command into the message - always 16 byte block
326 scb[0] = INQUIRY;
327 scb[1] = 0;
328 scb[2] = 0;
329 scb[3] = 0;
330 scb[4] = 36;
331 scb[5] = 0;
332 // Don't care about the rest of scb
333
334 memcpy(mptr, scb, sizeof(scb));
335 mptr+=4;
336 lenptr=mptr++; /* Remember me - fill in when we know */
337
338 /* Now fill in the SGList and command */
339 *lenptr = len;
340 *mptr++ = 0xD0000000|direction|len;
341 *mptr++ = virt_to_bus(buf);
342
343 // Send it on it's way
344 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
345 if (rcode != 0) {
346 sprintf(pHba->detail, "Adaptec I2O RAID");
347 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
348 if (rcode != -ETIME && rcode != -EINTR)
349 kfree(buf);
350 } else {
351 memset(pHba->detail, 0, sizeof(pHba->detail));
352 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
353 memcpy(&(pHba->detail[16]), " Model: ", 8);
354 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
355 memcpy(&(pHba->detail[40]), " FW: ", 4);
356 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
357 pHba->detail[48] = '\0'; /* precautionary */
358 kfree(buf);
359 }
360 adpt_i2o_status_get(pHba);
361 return ;
362}
363
364
365static int adpt_slave_configure(struct scsi_device * device)
366{
367 struct Scsi_Host *host = device->host;
368 adpt_hba* pHba;
369
370 pHba = (adpt_hba *) host->hostdata[0];
371
372 if (host->can_queue && device->tagged_supported) {
373 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
374 host->can_queue - 1);
375 } else {
376 scsi_adjust_queue_depth(device, 0, 1);
377 }
378 return 0;
379}
380
381static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
382{
383 adpt_hba* pHba = NULL;
384 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 cmd->scsi_done = done;
387 /*
388 * SCSI REQUEST_SENSE commands will be executed automatically by the
389 * Host Adapter for any errors, so they should not be executed
390 * explicitly unless the Sense Data is zero indicating that no error
391 * occurred.
392 */
393
394 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
395 cmd->result = (DID_OK << 16);
396 cmd->scsi_done(cmd);
397 return 0;
398 }
399
400 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
401 if (!pHba) {
402 return FAILED;
403 }
404
405 rmb();
406 /*
407 * TODO: I need to block here if I am processing ioctl cmds
408 * but if the outstanding cmds all finish before the ioctl,
409 * the scsi-core will not know to start sending cmds to me again.
410 * I need to a way to restart the scsi-cores queues or should I block
411 * calling scsi_done on the outstanding cmds instead
412 * for now we don't set the IOCTL state
413 */
414 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
415 pHba->host->last_reset = jiffies;
416 pHba->host->resetting = 1;
417 return 1;
418 }
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 // TODO if the cmd->device if offline then I may need to issue a bus rescan
421 // followed by a get_lct to see if the device is there anymore
422 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
423 /*
424 * First command request for this device. Set up a pointer
425 * to the device structure. This should be a TEST_UNIT_READY
426 * command from scan_scsis_single.
427 */
428 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
429 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
430 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
431 cmd->result = (DID_NO_CONNECT << 16);
432 cmd->scsi_done(cmd);
433 return 0;
434 }
435 cmd->device->hostdata = pDev;
436 }
437 pDev->pScsi_dev = cmd->device;
438
439 /*
440 * If we are being called from when the device is being reset,
441 * delay processing of the command until later.
442 */
443 if (pDev->state & DPTI_DEV_RESET ) {
444 return FAILED;
445 }
446 return adpt_scsi_to_i2o(pHba, cmd, pDev);
447}
448
449static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
450 sector_t capacity, int geom[])
451{
452 int heads=-1;
453 int sectors=-1;
454 int cylinders=-1;
455
456 // *** First lets set the default geometry ****
457
458 // If the capacity is less than ox2000
459 if (capacity < 0x2000 ) { // floppy
460 heads = 18;
461 sectors = 2;
462 }
463 // else if between 0x2000 and 0x20000
464 else if (capacity < 0x20000) {
465 heads = 64;
466 sectors = 32;
467 }
468 // else if between 0x20000 and 0x40000
469 else if (capacity < 0x40000) {
470 heads = 65;
471 sectors = 63;
472 }
473 // else if between 0x4000 and 0x80000
474 else if (capacity < 0x80000) {
475 heads = 128;
476 sectors = 63;
477 }
478 // else if greater than 0x80000
479 else {
480 heads = 255;
481 sectors = 63;
482 }
483 cylinders = sector_div(capacity, heads * sectors);
484
485 // Special case if CDROM
486 if(sdev->type == 5) { // CDROM
487 heads = 252;
488 sectors = 63;
489 cylinders = 1111;
490 }
491
492 geom[0] = heads;
493 geom[1] = sectors;
494 geom[2] = cylinders;
495
496 PDEBUG("adpt_bios_param: exit\n");
497 return 0;
498}
499
500
501static const char *adpt_info(struct Scsi_Host *host)
502{
503 adpt_hba* pHba;
504
505 pHba = (adpt_hba *) host->hostdata[0];
506 return (char *) (pHba->detail);
507}
508
509static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
510 int length, int inout)
511{
512 struct adpt_device* d;
513 int id;
514 int chan;
515 int len = 0;
516 int begin = 0;
517 int pos = 0;
518 adpt_hba* pHba;
519 int unit;
520
521 *start = buffer;
522 if (inout == TRUE) {
523 /*
524 * The user has done a write and wants us to take the
525 * data in the buffer and do something with it.
526 * proc_scsiwrite calls us with inout = 1
527 *
528 * Read data from buffer (writing to us) - NOT SUPPORTED
529 */
530 return -EINVAL;
531 }
532
533 /*
534 * inout = 0 means the user has done a read and wants information
535 * returned, so we write information about the cards into the buffer
536 * proc_scsiread() calls us with inout = 0
537 */
538
539 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100540 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 for (pHba = hba_chain; pHba; pHba = pHba->next) {
542 if (pHba->host == host) {
543 break; /* found adapter */
544 }
545 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100546 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (pHba == NULL) {
548 return 0;
549 }
550 host = pHba->host;
551
552 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
553 len += sprintf(buffer+len, "%s\n", pHba->detail);
554 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
555 pHba->host->host_no, pHba->name, host->irq);
556 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
557 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
558
559 pos = begin + len;
560
561 /* CHECKPOINT */
562 if(pos > offset + length) {
563 goto stop_output;
564 }
565 if(pos <= offset) {
566 /*
567 * If we haven't even written to where we last left
568 * off (the last time we were called), reset the
569 * beginning pointer.
570 */
571 len = 0;
572 begin = pos;
573 }
574 len += sprintf(buffer+len, "Devices:\n");
575 for(chan = 0; chan < MAX_CHANNEL; chan++) {
576 for(id = 0; id < MAX_ID; id++) {
577 d = pHba->channel[chan].device[id];
578 while(d){
579 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
580 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
581 pos = begin + len;
582
583
584 /* CHECKPOINT */
585 if(pos > offset + length) {
586 goto stop_output;
587 }
588 if(pos <= offset) {
589 len = 0;
590 begin = pos;
591 }
592
593 unit = d->pI2o_dev->lct_data.tid;
594 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
595 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
596 scsi_device_online(d->pScsi_dev)? "online":"offline");
597 pos = begin + len;
598
599 /* CHECKPOINT */
600 if(pos > offset + length) {
601 goto stop_output;
602 }
603 if(pos <= offset) {
604 len = 0;
605 begin = pos;
606 }
607
608 d = d->next_lun;
609 }
610 }
611 }
612
613 /*
614 * begin is where we last checked our position with regards to offset
615 * begin is always less than offset. len is relative to begin. It
616 * is the number of bytes written past begin
617 *
618 */
619stop_output:
620 /* stop the output and calculate the correct length */
621 *(buffer + len) = '\0';
622
623 *start = buffer + (offset - begin); /* Start of wanted data */
624 len -= (offset - begin);
625 if(len > length) {
626 len = length;
627 } else if(len < 0){
628 len = 0;
629 **start = '\0';
630 }
631 return len;
632}
633
634
635/*===========================================================================
636 * Error Handling routines
637 *===========================================================================
638 */
639
640static int adpt_abort(struct scsi_cmnd * cmd)
641{
642 adpt_hba* pHba = NULL; /* host bus adapter structure */
643 struct adpt_device* dptdevice; /* dpt per device information */
644 u32 msg[5];
645 int rcode;
646
647 if(cmd->serial_number == 0){
648 return FAILED;
649 }
650 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
651 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
652 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
653 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
654 return FAILED;
655 }
656
657 memset(msg, 0, sizeof(msg));
658 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
659 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
660 msg[2] = 0;
661 msg[3]= 0;
662 msg[4] = (u32)cmd;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800663 if (pHba->host)
664 spin_lock_irq(pHba->host->host_lock);
665 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
666 if (pHba->host)
667 spin_unlock_irq(pHba->host->host_lock);
668 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 if(rcode == -EOPNOTSUPP ){
670 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
671 return FAILED;
672 }
673 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
674 return FAILED;
675 }
676 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
677 return SUCCESS;
678}
679
680
681#define I2O_DEVICE_RESET 0x27
682// This is the same for BLK and SCSI devices
683// NOTE this is wrong in the i2o.h definitions
684// This is not currently supported by our adapter but we issue it anyway
685static int adpt_device_reset(struct scsi_cmnd* cmd)
686{
687 adpt_hba* pHba;
688 u32 msg[4];
689 u32 rcode;
690 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700691 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
693 pHba = (void*) cmd->device->host->hostdata[0];
694 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
695 if (!d) {
696 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
697 return FAILED;
698 }
699 memset(msg, 0, sizeof(msg));
700 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
701 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
702 msg[2] = 0;
703 msg[3] = 0;
704
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800705 if (pHba->host)
706 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 old_state = d->state;
708 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800709 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
710 d->state = old_state;
711 if (pHba->host)
712 spin_unlock_irq(pHba->host->host_lock);
713 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if(rcode == -EOPNOTSUPP ){
715 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
716 return FAILED;
717 }
718 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
719 return FAILED;
720 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
722 return SUCCESS;
723 }
724}
725
726
727#define I2O_HBA_BUS_RESET 0x87
728// This version of bus reset is called by the eh_error handler
729static int adpt_bus_reset(struct scsi_cmnd* cmd)
730{
731 adpt_hba* pHba;
732 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800733 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
736 memset(msg, 0, sizeof(msg));
737 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
738 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
739 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
740 msg[2] = 0;
741 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800742 if (pHba->host)
743 spin_lock_irq(pHba->host->host_lock);
744 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
745 if (pHba->host)
746 spin_unlock_irq(pHba->host->host_lock);
747 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
749 return FAILED;
750 } else {
751 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
752 return SUCCESS;
753 }
754}
755
756// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400757static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758{
759 adpt_hba* pHba;
760 int rcode;
761 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
762 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
763 rcode = adpt_hba_reset(pHba);
764 if(rcode == 0){
765 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
766 return SUCCESS;
767 } else {
768 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
769 return FAILED;
770 }
771}
772
Jeff Garzik df0ae242005-05-28 07:57:14 -0400773static int adpt_reset(struct scsi_cmnd* cmd)
774{
775 int rc;
776
777 spin_lock_irq(cmd->device->host->host_lock);
778 rc = __adpt_reset(cmd);
779 spin_unlock_irq(cmd->device->host->host_lock);
780
781 return rc;
782}
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
785static int adpt_hba_reset(adpt_hba* pHba)
786{
787 int rcode;
788
789 pHba->state |= DPTI_STATE_RESET;
790
791 // Activate does get status , init outbound, and get hrt
792 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
793 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
794 adpt_i2o_delete_hba(pHba);
795 return rcode;
796 }
797
798 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
799 adpt_i2o_delete_hba(pHba);
800 return rcode;
801 }
802 PDEBUG("%s: in HOLD state\n",pHba->name);
803
804 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
805 adpt_i2o_delete_hba(pHba);
806 return rcode;
807 }
808 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
809
810 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
811 adpt_i2o_delete_hba(pHba);
812 return rcode;
813 }
814
815 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
816 adpt_i2o_delete_hba(pHba);
817 return rcode;
818 }
819 pHba->state &= ~DPTI_STATE_RESET;
820
821 adpt_fail_posted_scbs(pHba);
822 return 0; /* return success */
823}
824
825/*===========================================================================
826 *
827 *===========================================================================
828 */
829
830
831static void adpt_i2o_sys_shutdown(void)
832{
833 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100834 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
837 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
838 /* Delete all IOPs from the controller chain */
839 /* They should have already been released by the
840 * scsi-core
841 */
842 for (pHba = hba_chain; pHba; pHba = pNext) {
843 pNext = pHba->next;
844 adpt_i2o_delete_hba(pHba);
845 }
846
847 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848// spin_lock_irqsave(&adpt_post_wait_lock, flags);
849 /* Nothing should be outstanding at this point so just
850 * free them
851 */
Adrian Bunk458af542005-11-27 00:36:37 +0100852 for(p1 = adpt_post_wait_queue; p1;) {
853 old = p1;
854 p1 = p1->next;
855 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
858 adpt_post_wait_queue = NULL;
859
860 printk(KERN_INFO "Adaptec I2O controllers down.\n");
861}
862
863/*
864 * reboot/shutdown notification.
865 *
866 * - Quiesce each IOP in the system
867 *
868 */
869
870#ifdef REBOOT_NOTIFIER
871static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
872{
873
874 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
875 return NOTIFY_DONE;
876
877 adpt_i2o_sys_shutdown();
878
879 return NOTIFY_DONE;
880}
881#endif
882
883
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -0600884static int adpt_install_hba(struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886
887 adpt_hba* pHba = NULL;
888 adpt_hba* p = NULL;
889 ulong base_addr0_phys = 0;
890 ulong base_addr1_phys = 0;
891 u32 hba_map0_area_size = 0;
892 u32 hba_map1_area_size = 0;
893 void __iomem *base_addr_virt = NULL;
894 void __iomem *msg_addr_virt = NULL;
895
896 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
898 if(pci_enable_device(pDev)) {
899 return -EINVAL;
900 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500901
902 if (pci_request_regions(pDev, "dpt_i2o")) {
903 PERROR("dpti: adpt_config_hba: pci request region failed\n");
904 return -EINVAL;
905 }
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 pci_set_master(pDev);
Matthias Gehre910638a2006-03-28 01:56:48 -0800908 if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) &&
909 pci_set_dma_mask(pDev, DMA_32BIT_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 return -EINVAL;
911
912 base_addr0_phys = pci_resource_start(pDev,0);
913 hba_map0_area_size = pci_resource_len(pDev,0);
914
915 // Check if standard PCI card or single BAR Raptor
916 if(pDev->device == PCI_DPT_DEVICE_ID){
917 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
918 // Raptor card with this device id needs 4M
919 hba_map0_area_size = 0x400000;
920 } else { // Not Raptor - it is a PCI card
921 if(hba_map0_area_size > 0x100000 ){
922 hba_map0_area_size = 0x100000;
923 }
924 }
925 } else {// Raptor split BAR config
926 // Use BAR1 in this configuration
927 base_addr1_phys = pci_resource_start(pDev,1);
928 hba_map1_area_size = pci_resource_len(pDev,1);
929 raptorFlag = TRUE;
930 }
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
933 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -0500934 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 PERROR("dpti: adpt_config_hba: io remap failed\n");
936 return -EINVAL;
937 }
938
939 if(raptorFlag == TRUE) {
940 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
941 if (!msg_addr_virt) {
942 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
943 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500944 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 return -EINVAL;
946 }
947 } else {
948 msg_addr_virt = base_addr_virt;
949 }
950
951 // Allocate and zero the data structure
952 pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
953 if( pHba == NULL) {
954 if(msg_addr_virt != base_addr_virt){
955 iounmap(msg_addr_virt);
956 }
957 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500958 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 return -ENOMEM;
960 }
961 memset(pHba, 0, sizeof(adpt_hba));
962
Arjan van de Ven0b950672006-01-11 13:16:10 +0100963 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
965 if(hba_chain != NULL){
966 for(p = hba_chain; p->next; p = p->next);
967 p->next = pHba;
968 } else {
969 hba_chain = pHba;
970 }
971 pHba->next = NULL;
972 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -0700973 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 hba_count++;
975
Arjan van de Ven0b950672006-01-11 13:16:10 +0100976 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 pHba->pDev = pDev;
979 pHba->base_addr_phys = base_addr0_phys;
980
981 // Set up the Virtual Base Address of the I2O Device
982 pHba->base_addr_virt = base_addr_virt;
983 pHba->msg_addr_virt = msg_addr_virt;
984 pHba->irq_mask = base_addr_virt+0x30;
985 pHba->post_port = base_addr_virt+0x40;
986 pHba->reply_port = base_addr_virt+0x44;
987
988 pHba->hrt = NULL;
989 pHba->lct = NULL;
990 pHba->lct_size = 0;
991 pHba->status_block = NULL;
992 pHba->post_count = 0;
993 pHba->state = DPTI_STATE_RESET;
994 pHba->pDev = pDev;
995 pHba->devices = NULL;
996
997 // Initializing the spinlocks
998 spin_lock_init(&pHba->state_lock);
999 spin_lock_init(&adpt_post_wait_lock);
1000
1001 if(raptorFlag == 0){
1002 printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n",
1003 hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
1004 } else {
1005 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
1006 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1007 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1008 }
1009
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001010 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1012 adpt_i2o_delete_hba(pHba);
1013 return -EINVAL;
1014 }
1015
1016 return 0;
1017}
1018
1019
1020static void adpt_i2o_delete_hba(adpt_hba* pHba)
1021{
1022 adpt_hba* p1;
1023 adpt_hba* p2;
1024 struct i2o_device* d;
1025 struct i2o_device* next;
1026 int i;
1027 int j;
1028 struct adpt_device* pDev;
1029 struct adpt_device* pNext;
1030
1031
Arjan van de Ven0b950672006-01-11 13:16:10 +01001032 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 if(pHba->host){
1034 free_irq(pHba->host->irq, pHba);
1035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 p2 = NULL;
1037 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1038 if(p1 == pHba) {
1039 if(p2) {
1040 p2->next = p1->next;
1041 } else {
1042 hba_chain = p1->next;
1043 }
1044 break;
1045 }
1046 }
1047
1048 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001049 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001052 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1054 iounmap(pHba->msg_addr_virt);
1055 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08001056 kfree(pHba->hrt);
1057 kfree(pHba->lct);
1058 kfree(pHba->status_block);
1059 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
1061 for(d = pHba->devices; d ; d = next){
1062 next = d->next;
1063 kfree(d);
1064 }
1065 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1066 for(j = 0; j < MAX_ID; j++){
1067 if(pHba->channel[i].device[j] != NULL){
1068 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1069 pNext = pDev->next_lun;
1070 kfree(pDev);
1071 }
1072 }
1073 }
1074 }
Alan Coxa07f3532006-09-15 15:34:32 +01001075 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 kfree(pHba);
1077
1078 if(hba_count <= 0){
1079 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1080 }
1081}
1082
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1085{
1086 struct adpt_device* d;
1087
1088 if(chan < 0 || chan >= MAX_CHANNEL)
1089 return NULL;
1090
1091 if( pHba->channel[chan].device == NULL){
1092 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1093 return NULL;
1094 }
1095
1096 d = pHba->channel[chan].device[id];
1097 if(!d || d->tid == 0) {
1098 return NULL;
1099 }
1100
1101 /* If it is the only lun at that address then this should match*/
1102 if(d->scsi_lun == lun){
1103 return d;
1104 }
1105
1106 /* else we need to look through all the luns */
1107 for(d=d->next_lun ; d ; d = d->next_lun){
1108 if(d->scsi_lun == lun){
1109 return d;
1110 }
1111 }
1112 return NULL;
1113}
1114
1115
1116static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1117{
1118 // I used my own version of the WAIT_QUEUE_HEAD
1119 // to handle some version differences
1120 // When embedded in the kernel this could go back to the vanilla one
1121 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1122 int status = 0;
1123 ulong flags = 0;
1124 struct adpt_i2o_post_wait_data *p1, *p2;
1125 struct adpt_i2o_post_wait_data *wait_data =
1126 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001127 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Andrew Morton4452ea52005-06-23 00:10:26 -07001129 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 /*
1133 * The spin locking is needed to keep anyone from playing
1134 * with the queue pointers and id while we do the same
1135 */
1136 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1137 // TODO we need a MORE unique way of getting ids
1138 // to support async LCT get
1139 wait_data->next = adpt_post_wait_queue;
1140 adpt_post_wait_queue = wait_data;
1141 adpt_post_wait_id++;
1142 adpt_post_wait_id &= 0x7fff;
1143 wait_data->id = adpt_post_wait_id;
1144 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1145
1146 wait_data->wq = &adpt_wq_i2o_post;
1147 wait_data->status = -ETIMEDOUT;
1148
Andrew Morton4452ea52005-06-23 00:10:26 -07001149 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
1151 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1152 timeout *= HZ;
1153 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1154 set_current_state(TASK_INTERRUPTIBLE);
1155 if(pHba->host)
1156 spin_unlock_irq(pHba->host->host_lock);
1157 if (!timeout)
1158 schedule();
1159 else{
1160 timeout = schedule_timeout(timeout);
1161 if (timeout == 0) {
1162 // I/O issued, but cannot get result in
1163 // specified time. Freeing resorces is
1164 // dangerous.
1165 status = -ETIME;
1166 }
1167 }
1168 if(pHba->host)
1169 spin_lock_irq(pHba->host->host_lock);
1170 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001171 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 if(status == -ETIMEDOUT){
1174 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1175 // We will have to free the wait_data memory during shutdown
1176 return status;
1177 }
1178
1179 /* Remove the entry from the queue. */
1180 p2 = NULL;
1181 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1182 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1183 if(p1 == wait_data) {
1184 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1185 status = -EOPNOTSUPP;
1186 }
1187 if(p2) {
1188 p2->next = p1->next;
1189 } else {
1190 adpt_post_wait_queue = p1->next;
1191 }
1192 break;
1193 }
1194 }
1195 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1196
1197 kfree(wait_data);
1198
1199 return status;
1200}
1201
1202
1203static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1204{
1205
1206 u32 m = EMPTY_QUEUE;
1207 u32 __iomem *msg;
1208 ulong timeout = jiffies + 30*HZ;
1209 do {
1210 rmb();
1211 m = readl(pHba->post_port);
1212 if (m != EMPTY_QUEUE) {
1213 break;
1214 }
1215 if(time_after(jiffies,timeout)){
1216 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1217 return -ETIMEDOUT;
1218 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001219 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 } while(m == EMPTY_QUEUE);
1221
1222 msg = pHba->msg_addr_virt + m;
1223 memcpy_toio(msg, data, len);
1224 wmb();
1225
1226 //post message
1227 writel(m, pHba->post_port);
1228 wmb();
1229
1230 return 0;
1231}
1232
1233
1234static void adpt_i2o_post_wait_complete(u32 context, int status)
1235{
1236 struct adpt_i2o_post_wait_data *p1 = NULL;
1237 /*
1238 * We need to search through the adpt_post_wait
1239 * queue to see if the given message is still
1240 * outstanding. If not, it means that the IOP
1241 * took longer to respond to the message than we
1242 * had allowed and timer has already expired.
1243 * Not much we can do about that except log
1244 * it for debug purposes, increase timeout, and recompile
1245 *
1246 * Lock needed to keep anyone from moving queue pointers
1247 * around while we're looking through them.
1248 */
1249
1250 context &= 0x7fff;
1251
1252 spin_lock(&adpt_post_wait_lock);
1253 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1254 if(p1->id == context) {
1255 p1->status = status;
1256 spin_unlock(&adpt_post_wait_lock);
1257 wake_up_interruptible(p1->wq);
1258 return;
1259 }
1260 }
1261 spin_unlock(&adpt_post_wait_lock);
1262 // If this happens we lose commands that probably really completed
1263 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1264 printk(KERN_DEBUG" Tasks in wait queue:\n");
1265 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1266 printk(KERN_DEBUG" %d\n",p1->id);
1267 }
1268 return;
1269}
1270
1271static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1272{
1273 u32 msg[8];
1274 u8* status;
1275 u32 m = EMPTY_QUEUE ;
1276 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1277
1278 if(pHba->initialized == FALSE) { // First time reset should be quick
1279 timeout = jiffies + (25*HZ);
1280 } else {
1281 adpt_i2o_quiesce_hba(pHba);
1282 }
1283
1284 do {
1285 rmb();
1286 m = readl(pHba->post_port);
1287 if (m != EMPTY_QUEUE) {
1288 break;
1289 }
1290 if(time_after(jiffies,timeout)){
1291 printk(KERN_WARNING"Timeout waiting for message!\n");
1292 return -ETIMEDOUT;
1293 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001294 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 } while (m == EMPTY_QUEUE);
1296
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301297 status = kzalloc(4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 if(status == NULL) {
1299 adpt_send_nop(pHba, m);
1300 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1301 return -ENOMEM;
1302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1305 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1306 msg[2]=0;
1307 msg[3]=0;
1308 msg[4]=0;
1309 msg[5]=0;
1310 msg[6]=virt_to_bus(status);
1311 msg[7]=0;
1312
1313 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1314 wmb();
1315 writel(m, pHba->post_port);
1316 wmb();
1317
1318 while(*status == 0){
1319 if(time_after(jiffies,timeout)){
1320 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1321 kfree(status);
1322 return -ETIMEDOUT;
1323 }
1324 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001325 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 }
1327
1328 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1329 PDEBUG("%s: Reset in progress...\n", pHba->name);
1330 // Here we wait for message frame to become available
1331 // indicated that reset has finished
1332 do {
1333 rmb();
1334 m = readl(pHba->post_port);
1335 if (m != EMPTY_QUEUE) {
1336 break;
1337 }
1338 if(time_after(jiffies,timeout)){
1339 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1340 return -ETIMEDOUT;
1341 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001342 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 } while (m == EMPTY_QUEUE);
1344 // Flush the offset
1345 adpt_send_nop(pHba, m);
1346 }
1347 adpt_i2o_status_get(pHba);
1348 if(*status == 0x02 ||
1349 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1350 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1351 pHba->name);
1352 } else {
1353 PDEBUG("%s: Reset completed.\n", pHba->name);
1354 }
1355
1356 kfree(status);
1357#ifdef UARTDELAY
1358 // This delay is to allow someone attached to the card through the debug UART to
1359 // set up the dump levels that they want before the rest of the initialization sequence
1360 adpt_delay(20000);
1361#endif
1362 return 0;
1363}
1364
1365
1366static int adpt_i2o_parse_lct(adpt_hba* pHba)
1367{
1368 int i;
1369 int max;
1370 int tid;
1371 struct i2o_device *d;
1372 i2o_lct *lct = pHba->lct;
1373 u8 bus_no = 0;
1374 s16 scsi_id;
1375 s16 scsi_lun;
1376 u32 buf[10]; // larger than 7, or 8 ...
1377 struct adpt_device* pDev;
1378
1379 if (lct == NULL) {
1380 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1381 return -1;
1382 }
1383
1384 max = lct->table_size;
1385 max -= 3;
1386 max /= 9;
1387
1388 for(i=0;i<max;i++) {
1389 if( lct->lct_entry[i].user_tid != 0xfff){
1390 /*
1391 * If we have hidden devices, we need to inform the upper layers about
1392 * the possible maximum id reference to handle device access when
1393 * an array is disassembled. This code has no other purpose but to
1394 * allow us future access to devices that are currently hidden
1395 * behind arrays, hotspares or have not been configured (JBOD mode).
1396 */
1397 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1398 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1399 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1400 continue;
1401 }
1402 tid = lct->lct_entry[i].tid;
1403 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1404 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1405 continue;
1406 }
1407 bus_no = buf[0]>>16;
1408 scsi_id = buf[1];
1409 scsi_lun = (buf[2]>>8 )&0xff;
1410 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1411 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1412 continue;
1413 }
1414 if (scsi_id >= MAX_ID){
1415 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1416 continue;
1417 }
1418 if(bus_no > pHba->top_scsi_channel){
1419 pHba->top_scsi_channel = bus_no;
1420 }
1421 if(scsi_id > pHba->top_scsi_id){
1422 pHba->top_scsi_id = scsi_id;
1423 }
1424 if(scsi_lun > pHba->top_scsi_lun){
1425 pHba->top_scsi_lun = scsi_lun;
1426 }
1427 continue;
1428 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001429 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 if(d==NULL)
1431 {
1432 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1433 return -ENOMEM;
1434 }
1435
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001436 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 d->next = NULL;
1438
1439 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1440
1441 d->flags = 0;
1442 tid = d->lct_data.tid;
1443 adpt_i2o_report_hba_unit(pHba, d);
1444 adpt_i2o_install_device(pHba, d);
1445 }
1446 bus_no = 0;
1447 for(d = pHba->devices; d ; d = d->next) {
1448 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1449 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1450 tid = d->lct_data.tid;
1451 // TODO get the bus_no from hrt-but for now they are in order
1452 //bus_no =
1453 if(bus_no > pHba->top_scsi_channel){
1454 pHba->top_scsi_channel = bus_no;
1455 }
1456 pHba->channel[bus_no].type = d->lct_data.class_id;
1457 pHba->channel[bus_no].tid = tid;
1458 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1459 {
1460 pHba->channel[bus_no].scsi_id = buf[1];
1461 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1462 }
1463 // TODO remove - this is just until we get from hrt
1464 bus_no++;
1465 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1466 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1467 break;
1468 }
1469 }
1470 }
1471
1472 // Setup adpt_device table
1473 for(d = pHba->devices; d ; d = d->next) {
1474 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1475 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1476 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1477
1478 tid = d->lct_data.tid;
1479 scsi_id = -1;
1480 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1481 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1482 bus_no = buf[0]>>16;
1483 scsi_id = buf[1];
1484 scsi_lun = (buf[2]>>8 )&0xff;
1485 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1486 continue;
1487 }
1488 if (scsi_id >= MAX_ID) {
1489 continue;
1490 }
1491 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301492 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if(pDev == NULL) {
1494 return -ENOMEM;
1495 }
1496 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 } else {
1498 for( pDev = pHba->channel[bus_no].device[scsi_id];
1499 pDev->next_lun; pDev = pDev->next_lun){
1500 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301501 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 if(pDev->next_lun == NULL) {
1503 return -ENOMEM;
1504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 pDev = pDev->next_lun;
1506 }
1507 pDev->tid = tid;
1508 pDev->scsi_channel = bus_no;
1509 pDev->scsi_id = scsi_id;
1510 pDev->scsi_lun = scsi_lun;
1511 pDev->pI2o_dev = d;
1512 d->owner = pDev;
1513 pDev->type = (buf[0])&0xff;
1514 pDev->flags = (buf[0]>>8)&0xff;
1515 if(scsi_id > pHba->top_scsi_id){
1516 pHba->top_scsi_id = scsi_id;
1517 }
1518 if(scsi_lun > pHba->top_scsi_lun){
1519 pHba->top_scsi_lun = scsi_lun;
1520 }
1521 }
1522 if(scsi_id == -1){
1523 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1524 d->lct_data.identity_tag);
1525 }
1526 }
1527 }
1528 return 0;
1529}
1530
1531
1532/*
1533 * Each I2O controller has a chain of devices on it - these match
1534 * the useful parts of the LCT of the board.
1535 */
1536
1537static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1538{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001539 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 d->controller=pHba;
1541 d->owner=NULL;
1542 d->next=pHba->devices;
1543 d->prev=NULL;
1544 if (pHba->devices != NULL){
1545 pHba->devices->prev=d;
1546 }
1547 pHba->devices=d;
1548 *d->dev_name = 0;
1549
Arjan van de Ven0b950672006-01-11 13:16:10 +01001550 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 return 0;
1552}
1553
1554static int adpt_open(struct inode *inode, struct file *file)
1555{
1556 int minor;
1557 adpt_hba* pHba;
1558
1559 //TODO check for root access
1560 //
1561 minor = iminor(inode);
1562 if (minor >= hba_count) {
1563 return -ENXIO;
1564 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001565 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1567 if (pHba->unit == minor) {
1568 break; /* found adapter */
1569 }
1570 }
1571 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001572 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 return -ENXIO;
1574 }
1575
1576// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001577 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578// return -EBUSY;
1579// }
1580
1581 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001582 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 return 0;
1585}
1586
1587static int adpt_close(struct inode *inode, struct file *file)
1588{
1589 int minor;
1590 adpt_hba* pHba;
1591
1592 minor = iminor(inode);
1593 if (minor >= hba_count) {
1594 return -ENXIO;
1595 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001596 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1598 if (pHba->unit == minor) {
1599 break; /* found adapter */
1600 }
1601 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001602 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 if (pHba == NULL) {
1604 return -ENXIO;
1605 }
1606
1607 pHba->in_use = 0;
1608
1609 return 0;
1610}
1611
1612
1613static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1614{
1615 u32 msg[MAX_MESSAGE_SIZE];
1616 u32* reply = NULL;
1617 u32 size = 0;
1618 u32 reply_size = 0;
1619 u32 __user *user_msg = arg;
1620 u32 __user * user_reply = NULL;
1621 void *sg_list[pHba->sg_tablesize];
1622 u32 sg_offset = 0;
1623 u32 sg_count = 0;
1624 int sg_index = 0;
1625 u32 i = 0;
1626 u32 rcode = 0;
1627 void *p = NULL;
1628 ulong flags = 0;
1629
1630 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1631 // get user msg size in u32s
1632 if(get_user(size, &user_msg[0])){
1633 return -EFAULT;
1634 }
1635 size = size>>16;
1636
1637 user_reply = &user_msg[size];
1638 if(size > MAX_MESSAGE_SIZE){
1639 return -EFAULT;
1640 }
1641 size *= 4; // Convert to bytes
1642
1643 /* Copy in the user's I2O command */
1644 if(copy_from_user(msg, user_msg, size)) {
1645 return -EFAULT;
1646 }
1647 get_user(reply_size, &user_reply[0]);
1648 reply_size = reply_size>>16;
1649 if(reply_size > REPLY_FRAME_SIZE){
1650 reply_size = REPLY_FRAME_SIZE;
1651 }
1652 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301653 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 if(reply == NULL) {
1655 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1656 return -ENOMEM;
1657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 sg_offset = (msg[0]>>4)&0xf;
1659 msg[2] = 0x40000000; // IOCTL context
1660 msg[3] = (u32)reply;
1661 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1662 if(sg_offset) {
1663 // TODO 64bit fix
1664 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1665 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1666 if (sg_count > pHba->sg_tablesize){
1667 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1668 kfree (reply);
1669 return -EINVAL;
1670 }
1671
1672 for(i = 0; i < sg_count; i++) {
1673 int sg_size;
1674
1675 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1676 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1677 rcode = -EINVAL;
1678 goto cleanup;
1679 }
1680 sg_size = sg[i].flag_count & 0xffffff;
1681 /* Allocate memory for the transfer */
1682 p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
1683 if(!p) {
1684 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1685 pHba->name,sg_size,i,sg_count);
1686 rcode = -ENOMEM;
1687 goto cleanup;
1688 }
1689 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1690 /* Copy in the user's SG buffer if necessary */
1691 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1692 // TODO 64bit fix
1693 if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
1694 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1695 rcode = -EFAULT;
1696 goto cleanup;
1697 }
1698 }
1699 //TODO 64bit fix
1700 sg[i].addr_bus = (u32)virt_to_bus(p);
1701 }
1702 }
1703
1704 do {
1705 if(pHba->host)
1706 spin_lock_irqsave(pHba->host->host_lock, flags);
1707 // This state stops any new commands from enterring the
1708 // controller while processing the ioctl
1709// pHba->state |= DPTI_STATE_IOCTL;
1710// We can't set this now - The scsi subsystem sets host_blocked and
1711// the queue empties and stops. We need a way to restart the queue
1712 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1713 if (rcode != 0)
1714 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1715 rcode, reply);
1716// pHba->state &= ~DPTI_STATE_IOCTL;
1717 if(pHba->host)
1718 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1719 } while(rcode == -ETIMEDOUT);
1720
1721 if(rcode){
1722 goto cleanup;
1723 }
1724
1725 if(sg_offset) {
1726 /* Copy back the Scatter Gather buffers back to user space */
1727 u32 j;
1728 // TODO 64bit fix
1729 struct sg_simple_element* sg;
1730 int sg_size;
1731
1732 // re-acquire the original message to handle correctly the sg copy operation
1733 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1734 // get user msg size in u32s
1735 if(get_user(size, &user_msg[0])){
1736 rcode = -EFAULT;
1737 goto cleanup;
1738 }
1739 size = size>>16;
1740 size *= 4;
1741 /* Copy in the user's I2O command */
1742 if (copy_from_user (msg, user_msg, size)) {
1743 rcode = -EFAULT;
1744 goto cleanup;
1745 }
1746 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1747
1748 // TODO 64bit fix
1749 sg = (struct sg_simple_element*)(msg + sg_offset);
1750 for (j = 0; j < sg_count; j++) {
1751 /* Copy out the SG list to user's buffer if necessary */
1752 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1753 sg_size = sg[j].flag_count & 0xffffff;
1754 // TODO 64bit fix
1755 if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
1756 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1757 rcode = -EFAULT;
1758 goto cleanup;
1759 }
1760 }
1761 }
1762 }
1763
1764 /* Copy back the reply to user space */
1765 if (reply_size) {
1766 // we wrote our own values for context - now restore the user supplied ones
1767 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1768 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1769 rcode = -EFAULT;
1770 }
1771 if(copy_to_user(user_reply, reply, reply_size)) {
1772 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1773 rcode = -EFAULT;
1774 }
1775 }
1776
1777
1778cleanup:
1779 if (rcode != -ETIME && rcode != -EINTR)
1780 kfree (reply);
1781 while(sg_index) {
1782 if(sg_list[--sg_index]) {
1783 if (rcode != -ETIME && rcode != -EINTR)
1784 kfree(sg_list[sg_index]);
1785 }
1786 }
1787 return rcode;
1788}
1789
1790
1791/*
1792 * This routine returns information about the system. This does not effect
1793 * any logic and if the info is wrong - it doesn't matter.
1794 */
1795
1796/* Get all the info we can not get from kernel services */
1797static int adpt_system_info(void __user *buffer)
1798{
1799 sysInfo_S si;
1800
1801 memset(&si, 0, sizeof(si));
1802
1803 si.osType = OS_LINUX;
Adrian Bunka4cd16e2005-06-25 14:59:01 -07001804 si.osMajorVersion = 0;
1805 si.osMinorVersion = 0;
1806 si.osRevision = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 si.busType = SI_PCI_BUS;
1808 si.processorFamily = DPTI_sig.dsProcessorFamily;
1809
1810#if defined __i386__
1811 adpt_i386_info(&si);
1812#elif defined (__ia64__)
1813 adpt_ia64_info(&si);
1814#elif defined(__sparc__)
1815 adpt_sparc_info(&si);
1816#elif defined (__alpha__)
1817 adpt_alpha_info(&si);
1818#else
1819 si.processorType = 0xff ;
1820#endif
1821 if(copy_to_user(buffer, &si, sizeof(si))){
1822 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1823 return -EFAULT;
1824 }
1825
1826 return 0;
1827}
1828
1829#if defined __ia64__
1830static void adpt_ia64_info(sysInfo_S* si)
1831{
1832 // This is all the info we need for now
1833 // We will add more info as our new
1834 // managmenent utility requires it
1835 si->processorType = PROC_IA64;
1836}
1837#endif
1838
1839
1840#if defined __sparc__
1841static void adpt_sparc_info(sysInfo_S* si)
1842{
1843 // This is all the info we need for now
1844 // We will add more info as our new
1845 // managmenent utility requires it
1846 si->processorType = PROC_ULTRASPARC;
1847}
1848#endif
1849
1850#if defined __alpha__
1851static void adpt_alpha_info(sysInfo_S* si)
1852{
1853 // This is all the info we need for now
1854 // We will add more info as our new
1855 // managmenent utility requires it
1856 si->processorType = PROC_ALPHA;
1857}
1858#endif
1859
1860#if defined __i386__
1861
1862static void adpt_i386_info(sysInfo_S* si)
1863{
1864 // This is all the info we need for now
1865 // We will add more info as our new
1866 // managmenent utility requires it
1867 switch (boot_cpu_data.x86) {
1868 case CPU_386:
1869 si->processorType = PROC_386;
1870 break;
1871 case CPU_486:
1872 si->processorType = PROC_486;
1873 break;
1874 case CPU_586:
1875 si->processorType = PROC_PENTIUM;
1876 break;
1877 default: // Just in case
1878 si->processorType = PROC_PENTIUM;
1879 break;
1880 }
1881}
1882
1883#endif
1884
1885
1886static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1887 ulong arg)
1888{
1889 int minor;
1890 int error = 0;
1891 adpt_hba* pHba;
1892 ulong flags = 0;
1893 void __user *argp = (void __user *)arg;
1894
1895 minor = iminor(inode);
1896 if (minor >= DPTI_MAX_HBA){
1897 return -ENXIO;
1898 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001899 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1901 if (pHba->unit == minor) {
1902 break; /* found adapter */
1903 }
1904 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001905 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 if(pHba == NULL){
1907 return -ENXIO;
1908 }
1909
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001910 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1911 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913 switch (cmd) {
1914 // TODO: handle 3 cases
1915 case DPT_SIGNATURE:
1916 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1917 return -EFAULT;
1918 }
1919 break;
1920 case I2OUSRCMD:
1921 return adpt_i2o_passthru(pHba, argp);
1922
1923 case DPT_CTRLINFO:{
1924 drvrHBAinfo_S HbaInfo;
1925
1926#define FLG_OSD_PCI_VALID 0x0001
1927#define FLG_OSD_DMA 0x0002
1928#define FLG_OSD_I2O 0x0004
1929 memset(&HbaInfo, 0, sizeof(HbaInfo));
1930 HbaInfo.drvrHBAnum = pHba->unit;
1931 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1932 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1933 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1934 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1935 HbaInfo.Interrupt = pHba->pDev->irq;
1936 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1937 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1938 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1939 return -EFAULT;
1940 }
1941 break;
1942 }
1943 case DPT_SYSINFO:
1944 return adpt_system_info(argp);
1945 case DPT_BLINKLED:{
1946 u32 value;
1947 value = (u32)adpt_read_blink_led(pHba);
1948 if (copy_to_user(argp, &value, sizeof(value))) {
1949 return -EFAULT;
1950 }
1951 break;
1952 }
1953 case I2ORESETCMD:
1954 if(pHba->host)
1955 spin_lock_irqsave(pHba->host->host_lock, flags);
1956 adpt_hba_reset(pHba);
1957 if(pHba->host)
1958 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1959 break;
1960 case I2ORESCANCMD:
1961 adpt_rescan(pHba);
1962 break;
1963 default:
1964 return -EINVAL;
1965 }
1966
1967 return error;
1968}
1969
1970
David Howells7d12e782006-10-05 14:55:46 +01001971static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
1973 struct scsi_cmnd* cmd;
1974 adpt_hba* pHba = dev_id;
1975 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001976 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 u32 status=0;
1978 u32 context;
1979 ulong flags = 0;
1980 int handled = 0;
1981
1982 if (pHba == NULL){
1983 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
1984 return IRQ_NONE;
1985 }
1986 if(pHba->host)
1987 spin_lock_irqsave(pHba->host->host_lock, flags);
1988
1989 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
1990 m = readl(pHba->reply_port);
1991 if(m == EMPTY_QUEUE){
1992 // Try twice then give up
1993 rmb();
1994 m = readl(pHba->reply_port);
1995 if(m == EMPTY_QUEUE){
1996 // This really should not happen
1997 printk(KERN_ERR"dpti: Could not get reply frame\n");
1998 goto out;
1999 }
2000 }
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002001 reply = bus_to_virt(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 if (readl(reply) & MSG_FAIL) {
2004 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002005 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 u32 old_context;
2007 PDEBUG("%s: Failed message\n",pHba->name);
2008 if(old_m >= 0x100000){
2009 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2010 writel(m,pHba->reply_port);
2011 continue;
2012 }
2013 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002014 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 old_context = readl(msg+12);
2016 writel(old_context, reply+12);
2017 adpt_send_nop(pHba, old_m);
2018 }
2019 context = readl(reply+8);
2020 if(context & 0x40000000){ // IOCTL
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002021 void *p = (void *)readl(reply+12);
2022 if( p != NULL) {
2023 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 }
2025 // All IOCTLs will also be post wait
2026 }
2027 if(context & 0x80000000){ // Post wait message
2028 status = readl(reply+16);
2029 if(status >> 24){
2030 status &= 0xffff; /* Get detail status */
2031 } else {
2032 status = I2O_POST_WAIT_OK;
2033 }
2034 if(!(context & 0x40000000)) {
2035 cmd = (struct scsi_cmnd*) readl(reply+12);
2036 if(cmd != NULL) {
2037 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2038 }
2039 }
2040 adpt_i2o_post_wait_complete(context, status);
2041 } else { // SCSI message
2042 cmd = (struct scsi_cmnd*) readl(reply+12);
2043 if(cmd != NULL){
2044 if(cmd->serial_number != 0) { // If not timedout
2045 adpt_i2o_to_scsi(reply, cmd);
2046 }
2047 }
2048 }
2049 writel(m, pHba->reply_port);
2050 wmb();
2051 rmb();
2052 }
2053 handled = 1;
2054out: if(pHba->host)
2055 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2056 return IRQ_RETVAL(handled);
2057}
2058
2059static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2060{
2061 int i;
2062 u32 msg[MAX_MESSAGE_SIZE];
2063 u32* mptr;
2064 u32 *lenptr;
2065 int direction;
2066 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002067 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 u32 len;
2069 u32 reqlen;
2070 s32 rcode;
2071
2072 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002073 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 direction = 0x00000000;
2075
2076 scsidir = 0x00000000; // DATA NO XFER
2077 if(len) {
2078 /*
2079 * Set SCBFlags to indicate if data is being transferred
2080 * in or out, or no data transfer
2081 * Note: Do not have to verify index is less than 0 since
2082 * cmd->cmnd[0] is an unsigned char
2083 */
2084 switch(cmd->sc_data_direction){
2085 case DMA_FROM_DEVICE:
2086 scsidir =0x40000000; // DATA IN (iop<--dev)
2087 break;
2088 case DMA_TO_DEVICE:
2089 direction=0x04000000; // SGL OUT
2090 scsidir =0x80000000; // DATA OUT (iop-->dev)
2091 break;
2092 case DMA_NONE:
2093 break;
2094 case DMA_BIDIRECTIONAL:
2095 scsidir =0x40000000; // DATA IN (iop<--dev)
2096 // Assume In - and continue;
2097 break;
2098 default:
2099 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2100 pHba->name, cmd->cmnd[0]);
2101 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2102 cmd->scsi_done(cmd);
2103 return 0;
2104 }
2105 }
2106 // msg[0] is set later
2107 // I2O_CMD_SCSI_EXEC
2108 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2109 msg[2] = 0;
2110 msg[3] = (u32)cmd; /* We want the SCSI control block back */
2111 // Our cards use the transaction context as the tag for queueing
2112 // Adaptec/DPT Private stuff
2113 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2114 msg[5] = d->tid;
2115 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2116 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2117 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2118 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2119 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2120
2121 mptr=msg+7;
2122
2123 // Write SCSI command into the message - always 16 byte block
2124 memset(mptr, 0, 16);
2125 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2126 mptr+=4;
2127 lenptr=mptr++; /* Remember me - fill in when we know */
2128 reqlen = 14; // SINGLE SGE
2129 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002131 nseg = scsi_dma_map(cmd);
2132 BUG_ON(nseg < 0);
2133 if (nseg) {
2134 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
2136 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002137 scsi_for_each_sg(cmd, sg, nseg, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2139 len+=sg_dma_len(sg);
2140 *mptr++ = sg_dma_address(sg);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002141 /* Make this an end of list */
2142 if (i == nseg - 1)
2143 mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 reqlen = mptr - msg;
2146 *lenptr = len;
2147
2148 if(cmd->underflow && len != cmd->underflow){
2149 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2150 len, cmd->underflow);
2151 }
2152 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002153 *lenptr = len = 0;
2154 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
2157 /* Stick the headers on */
2158 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2159
2160 // Send it on it's way
2161 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2162 if (rcode == 0) {
2163 return 0;
2164 }
2165 return rcode;
2166}
2167
2168
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002169static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170{
2171 adpt_hba* pHba;
2172 u32 hba_status;
2173 u32 dev_status;
2174 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2175 // I know this would look cleaner if I just read bytes
2176 // but the model I have been using for all the rest of the
2177 // io is in 4 byte words - so I keep that model
2178 u16 detailed_status = readl(reply+16) &0xffff;
2179 dev_status = (detailed_status & 0xff);
2180 hba_status = detailed_status >> 8;
2181
2182 // calculate resid for sg
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002183 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
2185 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2186
2187 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2188
2189 if(!(reply_flags & MSG_FAIL)) {
2190 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2191 case I2O_SCSI_DSC_SUCCESS:
2192 cmd->result = (DID_OK << 16);
2193 // handle underflow
2194 if(readl(reply+5) < cmd->underflow ) {
2195 cmd->result = (DID_ERROR <<16);
2196 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2197 }
2198 break;
2199 case I2O_SCSI_DSC_REQUEST_ABORTED:
2200 cmd->result = (DID_ABORT << 16);
2201 break;
2202 case I2O_SCSI_DSC_PATH_INVALID:
2203 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2204 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2205 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2206 case I2O_SCSI_DSC_NO_ADAPTER:
2207 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2208 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2209 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2210 cmd->result = (DID_TIME_OUT << 16);
2211 break;
2212 case I2O_SCSI_DSC_ADAPTER_BUSY:
2213 case I2O_SCSI_DSC_BUS_BUSY:
2214 cmd->result = (DID_BUS_BUSY << 16);
2215 break;
2216 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2217 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2218 cmd->result = (DID_RESET << 16);
2219 break;
2220 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2221 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2222 cmd->result = (DID_PARITY << 16);
2223 break;
2224 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2225 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2226 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2227 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2228 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2229 case I2O_SCSI_DSC_DATA_OVERRUN:
2230 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2231 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2232 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2233 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2234 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2235 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2236 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2237 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2238 case I2O_SCSI_DSC_INVALID_CDB:
2239 case I2O_SCSI_DSC_LUN_INVALID:
2240 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2241 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2242 case I2O_SCSI_DSC_NO_NEXUS:
2243 case I2O_SCSI_DSC_CDB_RECEIVED:
2244 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2245 case I2O_SCSI_DSC_QUEUE_FROZEN:
2246 case I2O_SCSI_DSC_REQUEST_INVALID:
2247 default:
2248 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2249 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2250 hba_status, dev_status, cmd->cmnd[0]);
2251 cmd->result = (DID_ERROR << 16);
2252 break;
2253 }
2254
2255 // copy over the request sense data if it was a check
2256 // condition status
2257 if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2258 u32 len = sizeof(cmd->sense_buffer);
2259 len = (len > 40) ? 40 : len;
2260 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002261 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2263 cmd->sense_buffer[2] == DATA_PROTECT ){
2264 /* This is to handle an array failed */
2265 cmd->result = (DID_TIME_OUT << 16);
2266 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2267 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2268 hba_status, dev_status, cmd->cmnd[0]);
2269
2270 }
2271 }
2272 } else {
2273 /* In this condtion we could not talk to the tid
2274 * the card rejected it. We should signal a retry
2275 * for a limitted number of retries.
2276 */
2277 cmd->result = (DID_TIME_OUT << 16);
2278 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2279 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2280 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2281 }
2282
2283 cmd->result |= (dev_status);
2284
2285 if(cmd->scsi_done != NULL){
2286 cmd->scsi_done(cmd);
2287 }
2288 return cmd->result;
2289}
2290
2291
2292static s32 adpt_rescan(adpt_hba* pHba)
2293{
2294 s32 rcode;
2295 ulong flags = 0;
2296
2297 if(pHba->host)
2298 spin_lock_irqsave(pHba->host->host_lock, flags);
2299 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2300 goto out;
2301 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2302 goto out;
2303 rcode = 0;
2304out: if(pHba->host)
2305 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2306 return rcode;
2307}
2308
2309
2310static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2311{
2312 int i;
2313 int max;
2314 int tid;
2315 struct i2o_device *d;
2316 i2o_lct *lct = pHba->lct;
2317 u8 bus_no = 0;
2318 s16 scsi_id;
2319 s16 scsi_lun;
2320 u32 buf[10]; // at least 8 u32's
2321 struct adpt_device* pDev = NULL;
2322 struct i2o_device* pI2o_dev = NULL;
2323
2324 if (lct == NULL) {
2325 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2326 return -1;
2327 }
2328
2329 max = lct->table_size;
2330 max -= 3;
2331 max /= 9;
2332
2333 // Mark each drive as unscanned
2334 for (d = pHba->devices; d; d = d->next) {
2335 pDev =(struct adpt_device*) d->owner;
2336 if(!pDev){
2337 continue;
2338 }
2339 pDev->state |= DPTI_DEV_UNSCANNED;
2340 }
2341
2342 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2343
2344 for(i=0;i<max;i++) {
2345 if( lct->lct_entry[i].user_tid != 0xfff){
2346 continue;
2347 }
2348
2349 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2350 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2351 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2352 tid = lct->lct_entry[i].tid;
2353 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2354 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2355 continue;
2356 }
2357 bus_no = buf[0]>>16;
2358 scsi_id = buf[1];
2359 scsi_lun = (buf[2]>>8 )&0xff;
2360 pDev = pHba->channel[bus_no].device[scsi_id];
2361 /* da lun */
2362 while(pDev) {
2363 if(pDev->scsi_lun == scsi_lun) {
2364 break;
2365 }
2366 pDev = pDev->next_lun;
2367 }
2368 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002369 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 if(d==NULL)
2371 {
2372 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2373 return -ENOMEM;
2374 }
2375
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002376 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 d->next = NULL;
2378
2379 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2380
2381 d->flags = 0;
2382 adpt_i2o_report_hba_unit(pHba, d);
2383 adpt_i2o_install_device(pHba, d);
2384
2385 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2386 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2387 continue;
2388 }
2389 pDev = pHba->channel[bus_no].device[scsi_id];
2390 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302391 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 if(pDev == NULL) {
2393 return -ENOMEM;
2394 }
2395 pHba->channel[bus_no].device[scsi_id] = pDev;
2396 } else {
2397 while (pDev->next_lun) {
2398 pDev = pDev->next_lun;
2399 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302400 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 if(pDev == NULL) {
2402 return -ENOMEM;
2403 }
2404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 pDev->tid = d->lct_data.tid;
2406 pDev->scsi_channel = bus_no;
2407 pDev->scsi_id = scsi_id;
2408 pDev->scsi_lun = scsi_lun;
2409 pDev->pI2o_dev = d;
2410 d->owner = pDev;
2411 pDev->type = (buf[0])&0xff;
2412 pDev->flags = (buf[0]>>8)&0xff;
2413 // Too late, SCSI system has made up it's mind, but what the hey ...
2414 if(scsi_id > pHba->top_scsi_id){
2415 pHba->top_scsi_id = scsi_id;
2416 }
2417 if(scsi_lun > pHba->top_scsi_lun){
2418 pHba->top_scsi_lun = scsi_lun;
2419 }
2420 continue;
2421 } // end of new i2o device
2422
2423 // We found an old device - check it
2424 while(pDev) {
2425 if(pDev->scsi_lun == scsi_lun) {
2426 if(!scsi_device_online(pDev->pScsi_dev)) {
2427 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2428 pHba->name,bus_no,scsi_id,scsi_lun);
2429 if (pDev->pScsi_dev) {
2430 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2431 }
2432 }
2433 d = pDev->pI2o_dev;
2434 if(d->lct_data.tid != tid) { // something changed
2435 pDev->tid = tid;
2436 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2437 if (pDev->pScsi_dev) {
2438 pDev->pScsi_dev->changed = TRUE;
2439 pDev->pScsi_dev->removable = TRUE;
2440 }
2441 }
2442 // Found it - mark it scanned
2443 pDev->state = DPTI_DEV_ONLINE;
2444 break;
2445 }
2446 pDev = pDev->next_lun;
2447 }
2448 }
2449 }
2450 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2451 pDev =(struct adpt_device*) pI2o_dev->owner;
2452 if(!pDev){
2453 continue;
2454 }
2455 // Drive offline drives that previously existed but could not be found
2456 // in the LCT table
2457 if (pDev->state & DPTI_DEV_UNSCANNED){
2458 pDev->state = DPTI_DEV_OFFLINE;
2459 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2460 if (pDev->pScsi_dev) {
2461 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2462 }
2463 }
2464 }
2465 return 0;
2466}
2467
2468static void adpt_fail_posted_scbs(adpt_hba* pHba)
2469{
2470 struct scsi_cmnd* cmd = NULL;
2471 struct scsi_device* d = NULL;
2472
2473 shost_for_each_device(d, pHba->host) {
2474 unsigned long flags;
2475 spin_lock_irqsave(&d->list_lock, flags);
2476 list_for_each_entry(cmd, &d->cmd_list, list) {
2477 if(cmd->serial_number == 0){
2478 continue;
2479 }
2480 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2481 cmd->scsi_done(cmd);
2482 }
2483 spin_unlock_irqrestore(&d->list_lock, flags);
2484 }
2485}
2486
2487
2488/*============================================================================
2489 * Routines from i2o subsystem
2490 *============================================================================
2491 */
2492
2493
2494
2495/*
2496 * Bring an I2O controller into HOLD state. See the spec.
2497 */
2498static int adpt_i2o_activate_hba(adpt_hba* pHba)
2499{
2500 int rcode;
2501
2502 if(pHba->initialized ) {
2503 if (adpt_i2o_status_get(pHba) < 0) {
2504 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2505 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2506 return rcode;
2507 }
2508 if (adpt_i2o_status_get(pHba) < 0) {
2509 printk(KERN_INFO "HBA not responding.\n");
2510 return -1;
2511 }
2512 }
2513
2514 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2515 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2516 return -1;
2517 }
2518
2519 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2520 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2521 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2522 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2523 adpt_i2o_reset_hba(pHba);
2524 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2525 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2526 return -1;
2527 }
2528 }
2529 } else {
2530 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2531 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2532 return rcode;
2533 }
2534
2535 }
2536
2537 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2538 return -1;
2539 }
2540
2541 /* In HOLD state */
2542
2543 if (adpt_i2o_hrt_get(pHba) < 0) {
2544 return -1;
2545 }
2546
2547 return 0;
2548}
2549
2550/*
2551 * Bring a controller online into OPERATIONAL state.
2552 */
2553
2554static int adpt_i2o_online_hba(adpt_hba* pHba)
2555{
2556 if (adpt_i2o_systab_send(pHba) < 0) {
2557 adpt_i2o_delete_hba(pHba);
2558 return -1;
2559 }
2560 /* In READY state */
2561
2562 if (adpt_i2o_enable_hba(pHba) < 0) {
2563 adpt_i2o_delete_hba(pHba);
2564 return -1;
2565 }
2566
2567 /* In OPERATIONAL state */
2568 return 0;
2569}
2570
2571static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2572{
2573 u32 __iomem *msg;
2574 ulong timeout = jiffies + 5*HZ;
2575
2576 while(m == EMPTY_QUEUE){
2577 rmb();
2578 m = readl(pHba->post_port);
2579 if(m != EMPTY_QUEUE){
2580 break;
2581 }
2582 if(time_after(jiffies,timeout)){
2583 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2584 return 2;
2585 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002586 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 }
2588 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2589 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2590 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2591 writel( 0,&msg[2]);
2592 wmb();
2593
2594 writel(m, pHba->post_port);
2595 wmb();
2596 return 0;
2597}
2598
2599static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2600{
2601 u8 *status;
2602 u32 __iomem *msg = NULL;
2603 int i;
2604 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2605 u32* ptr;
2606 u32 outbound_frame; // This had to be a 32 bit address
2607 u32 m;
2608
2609 do {
2610 rmb();
2611 m = readl(pHba->post_port);
2612 if (m != EMPTY_QUEUE) {
2613 break;
2614 }
2615
2616 if(time_after(jiffies,timeout)){
2617 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2618 return -ETIMEDOUT;
2619 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002620 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 } while(m == EMPTY_QUEUE);
2622
2623 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2624
2625 status = kmalloc(4,GFP_KERNEL|ADDR32);
2626 if (status==NULL) {
2627 adpt_send_nop(pHba, m);
2628 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2629 pHba->name);
2630 return -ENOMEM;
2631 }
2632 memset(status, 0, 4);
2633
2634 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2635 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2636 writel(0, &msg[2]);
2637 writel(0x0106, &msg[3]); /* Transaction context */
2638 writel(4096, &msg[4]); /* Host page frame size */
2639 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2640 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2641 writel(virt_to_bus(status), &msg[7]);
2642
2643 writel(m, pHba->post_port);
2644 wmb();
2645
2646 // Wait for the reply status to come back
2647 do {
2648 if (*status) {
2649 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2650 break;
2651 }
2652 }
2653 rmb();
2654 if(time_after(jiffies,timeout)){
2655 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2656 return -ETIMEDOUT;
2657 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002658 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 } while (1);
2660
2661 // If the command was successful, fill the fifo with our reply
2662 // message packets
2663 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002664 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 return -2;
2666 }
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002667 kfree(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002669 kfree(pHba->reply_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002671 pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 if(!pHba->reply_pool){
2673 printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2674 return -1;
2675 }
2676 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2677
2678 ptr = pHba->reply_pool;
2679 for(i = 0; i < pHba->reply_fifo_size; i++) {
2680 outbound_frame = (u32)virt_to_bus(ptr);
2681 writel(outbound_frame, pHba->reply_port);
2682 wmb();
2683 ptr += REPLY_FRAME_SIZE;
2684 }
2685 adpt_i2o_status_get(pHba);
2686 return 0;
2687}
2688
2689
2690/*
2691 * I2O System Table. Contains information about
2692 * all the IOPs in the system. Used to inform IOPs
2693 * about each other's existence.
2694 *
2695 * sys_tbl_ver is the CurrentChangeIndicator that is
2696 * used by IOPs to track changes.
2697 */
2698
2699
2700
2701static s32 adpt_i2o_status_get(adpt_hba* pHba)
2702{
2703 ulong timeout;
2704 u32 m;
2705 u32 __iomem *msg;
2706 u8 *status_block=NULL;
2707 ulong status_block_bus;
2708
2709 if(pHba->status_block == NULL) {
2710 pHba->status_block = (i2o_status_block*)
2711 kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2712 if(pHba->status_block == NULL) {
2713 printk(KERN_ERR
2714 "dpti%d: Get Status Block failed; Out of memory. \n",
2715 pHba->unit);
2716 return -ENOMEM;
2717 }
2718 }
2719 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2720 status_block = (u8*)(pHba->status_block);
2721 status_block_bus = virt_to_bus(pHba->status_block);
2722 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2723 do {
2724 rmb();
2725 m = readl(pHba->post_port);
2726 if (m != EMPTY_QUEUE) {
2727 break;
2728 }
2729 if(time_after(jiffies,timeout)){
2730 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2731 pHba->name);
2732 return -ETIMEDOUT;
2733 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002734 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 } while(m==EMPTY_QUEUE);
2736
2737
2738 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2739
2740 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2741 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2742 writel(1, &msg[2]);
2743 writel(0, &msg[3]);
2744 writel(0, &msg[4]);
2745 writel(0, &msg[5]);
2746 writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2747 writel(0, &msg[7]);
2748 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2749
2750 //post message
2751 writel(m, pHba->post_port);
2752 wmb();
2753
2754 while(status_block[87]!=0xff){
2755 if(time_after(jiffies,timeout)){
2756 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2757 pHba->unit);
2758 return -ETIMEDOUT;
2759 }
2760 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002761 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 }
2763
2764 // Set up our number of outbound and inbound messages
2765 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2766 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2767 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2768 }
2769
2770 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2771 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2772 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2773 }
2774
2775 // Calculate the Scatter Gather list size
2776 pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2777 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2778 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2779 }
2780
2781
2782#ifdef DEBUG
2783 printk("dpti%d: State = ",pHba->unit);
2784 switch(pHba->status_block->iop_state) {
2785 case 0x01:
2786 printk("INIT\n");
2787 break;
2788 case 0x02:
2789 printk("RESET\n");
2790 break;
2791 case 0x04:
2792 printk("HOLD\n");
2793 break;
2794 case 0x05:
2795 printk("READY\n");
2796 break;
2797 case 0x08:
2798 printk("OPERATIONAL\n");
2799 break;
2800 case 0x10:
2801 printk("FAILED\n");
2802 break;
2803 case 0x11:
2804 printk("FAULTED\n");
2805 break;
2806 default:
2807 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2808 }
2809#endif
2810 return 0;
2811}
2812
2813/*
2814 * Get the IOP's Logical Configuration Table
2815 */
2816static int adpt_i2o_lct_get(adpt_hba* pHba)
2817{
2818 u32 msg[8];
2819 int ret;
2820 u32 buf[16];
2821
2822 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2823 pHba->lct_size = pHba->status_block->expected_lct_size;
2824 }
2825 do {
2826 if (pHba->lct == NULL) {
2827 pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2828 if(pHba->lct == NULL) {
2829 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2830 pHba->name);
2831 return -ENOMEM;
2832 }
2833 }
2834 memset(pHba->lct, 0, pHba->lct_size);
2835
2836 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2837 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2838 msg[2] = 0;
2839 msg[3] = 0;
2840 msg[4] = 0xFFFFFFFF; /* All devices */
2841 msg[5] = 0x00000000; /* Report now */
2842 msg[6] = 0xD0000000|pHba->lct_size;
2843 msg[7] = virt_to_bus(pHba->lct);
2844
2845 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2846 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2847 pHba->name, ret);
2848 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2849 return ret;
2850 }
2851
2852 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2853 pHba->lct_size = pHba->lct->table_size << 2;
2854 kfree(pHba->lct);
2855 pHba->lct = NULL;
2856 }
2857 } while (pHba->lct == NULL);
2858
2859 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2860
2861
2862 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2863 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2864 pHba->FwDebugBufferSize = buf[1];
2865 pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0];
2866 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2867 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2868 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
2869 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2870 pHba->FwDebugBuffer_P += buf[2];
2871 pHba->FwDebugFlags = 0;
2872 }
2873
2874 return 0;
2875}
2876
2877static int adpt_i2o_build_sys_table(void)
2878{
2879 adpt_hba* pHba = NULL;
2880 int count = 0;
2881
2882 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
2883 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
2884
Jesper Juhlc9475cb2005-11-07 01:01:26 -08002885 kfree(sys_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
2887 sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2888 if(!sys_tbl) {
2889 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2890 return -ENOMEM;
2891 }
2892 memset(sys_tbl, 0, sys_tbl_len);
2893
2894 sys_tbl->num_entries = hba_count;
2895 sys_tbl->version = I2OVERSION;
2896 sys_tbl->change_ind = sys_tbl_ind++;
2897
2898 for(pHba = hba_chain; pHba; pHba = pHba->next) {
2899 // Get updated Status Block so we have the latest information
2900 if (adpt_i2o_status_get(pHba)) {
2901 sys_tbl->num_entries--;
2902 continue; // try next one
2903 }
2904
2905 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2906 sys_tbl->iops[count].iop_id = pHba->unit + 2;
2907 sys_tbl->iops[count].seg_num = 0;
2908 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2909 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2910 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2911 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2912 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2913 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002914 sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port);
2915 sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
2917 count++;
2918 }
2919
2920#ifdef DEBUG
2921{
2922 u32 *table = (u32*)sys_tbl;
2923 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2924 for(count = 0; count < (sys_tbl_len >>2); count++) {
2925 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2926 count, table[count]);
2927 }
2928}
2929#endif
2930
2931 return 0;
2932}
2933
2934
2935/*
2936 * Dump the information block associated with a given unit (TID)
2937 */
2938
2939static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2940{
2941 char buf[64];
2942 int unit = d->lct_data.tid;
2943
2944 printk(KERN_INFO "TID %3.3d ", unit);
2945
2946 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
2947 {
2948 buf[16]=0;
2949 printk(" Vendor: %-12.12s", buf);
2950 }
2951 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
2952 {
2953 buf[16]=0;
2954 printk(" Device: %-12.12s", buf);
2955 }
2956 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
2957 {
2958 buf[8]=0;
2959 printk(" Rev: %-12.12s\n", buf);
2960 }
2961#ifdef DEBUG
2962 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
2963 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
2964 printk(KERN_INFO "\tFlags: ");
2965
2966 if(d->lct_data.device_flags&(1<<0))
2967 printk("C"); // ConfigDialog requested
2968 if(d->lct_data.device_flags&(1<<1))
2969 printk("U"); // Multi-user capable
2970 if(!(d->lct_data.device_flags&(1<<4)))
2971 printk("P"); // Peer service enabled!
2972 if(!(d->lct_data.device_flags&(1<<5)))
2973 printk("M"); // Mgmt service enabled!
2974 printk("\n");
2975#endif
2976}
2977
2978#ifdef DEBUG
2979/*
2980 * Do i2o class name lookup
2981 */
2982static const char *adpt_i2o_get_class_name(int class)
2983{
2984 int idx = 16;
2985 static char *i2o_class_name[] = {
2986 "Executive",
2987 "Device Driver Module",
2988 "Block Device",
2989 "Tape Device",
2990 "LAN Interface",
2991 "WAN Interface",
2992 "Fibre Channel Port",
2993 "Fibre Channel Device",
2994 "SCSI Device",
2995 "ATE Port",
2996 "ATE Device",
2997 "Floppy Controller",
2998 "Floppy Device",
2999 "Secondary Bus Port",
3000 "Peer Transport Agent",
3001 "Peer Transport",
3002 "Unknown"
3003 };
3004
3005 switch(class&0xFFF) {
3006 case I2O_CLASS_EXECUTIVE:
3007 idx = 0; break;
3008 case I2O_CLASS_DDM:
3009 idx = 1; break;
3010 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3011 idx = 2; break;
3012 case I2O_CLASS_SEQUENTIAL_STORAGE:
3013 idx = 3; break;
3014 case I2O_CLASS_LAN:
3015 idx = 4; break;
3016 case I2O_CLASS_WAN:
3017 idx = 5; break;
3018 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3019 idx = 6; break;
3020 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3021 idx = 7; break;
3022 case I2O_CLASS_SCSI_PERIPHERAL:
3023 idx = 8; break;
3024 case I2O_CLASS_ATE_PORT:
3025 idx = 9; break;
3026 case I2O_CLASS_ATE_PERIPHERAL:
3027 idx = 10; break;
3028 case I2O_CLASS_FLOPPY_CONTROLLER:
3029 idx = 11; break;
3030 case I2O_CLASS_FLOPPY_DEVICE:
3031 idx = 12; break;
3032 case I2O_CLASS_BUS_ADAPTER_PORT:
3033 idx = 13; break;
3034 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3035 idx = 14; break;
3036 case I2O_CLASS_PEER_TRANSPORT:
3037 idx = 15; break;
3038 }
3039 return i2o_class_name[idx];
3040}
3041#endif
3042
3043
3044static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3045{
3046 u32 msg[6];
3047 int ret, size = sizeof(i2o_hrt);
3048
3049 do {
3050 if (pHba->hrt == NULL) {
3051 pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3052 if (pHba->hrt == NULL) {
3053 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3054 return -ENOMEM;
3055 }
3056 }
3057
3058 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3059 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3060 msg[2]= 0;
3061 msg[3]= 0;
3062 msg[4]= (0xD0000000 | size); /* Simple transaction */
3063 msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */
3064
3065 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3066 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3067 return ret;
3068 }
3069
3070 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3071 size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3072 kfree(pHba->hrt);
3073 pHba->hrt = NULL;
3074 }
3075 } while(pHba->hrt == NULL);
3076 return 0;
3077}
3078
3079/*
3080 * Query one scalar group value or a whole scalar group.
3081 */
3082static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3083 int group, int field, void *buf, int buflen)
3084{
3085 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3086 u8 *resblk;
3087
3088 int size;
3089
3090 /* 8 bytes for header */
3091 resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32);
3092 if (resblk == NULL) {
3093 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3094 return -ENOMEM;
3095 }
3096
3097 if (field == -1) /* whole group */
3098 opblk[4] = -1;
3099
3100 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3101 opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen));
3102 if (size == -ETIME) {
3103 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3104 return -ETIME;
3105 } else if (size == -EINTR) {
3106 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3107 return -EINTR;
3108 }
3109
3110 memcpy(buf, resblk+8, buflen); /* cut off header */
3111
3112 kfree(resblk);
3113 if (size < 0)
3114 return size;
3115
3116 return buflen;
3117}
3118
3119
3120/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3121 *
3122 * This function can be used for all UtilParamsGet/Set operations.
3123 * The OperationBlock is given in opblk-buffer,
3124 * and results are returned in resblk-buffer.
3125 * Note that the minimum sized resblk is 8 bytes and contains
3126 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3127 */
3128static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3129 void *opblk, int oplen, void *resblk, int reslen)
3130{
3131 u32 msg[9];
3132 u32 *res = (u32 *)resblk;
3133 int wait_status;
3134
3135 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3136 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3137 msg[2] = 0;
3138 msg[3] = 0;
3139 msg[4] = 0;
3140 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3141 msg[6] = virt_to_bus(opblk);
3142 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3143 msg[8] = virt_to_bus(resblk);
3144
3145 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3146 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk);
3147 return wait_status; /* -DetailedStatus */
3148 }
3149
3150 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3151 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3152 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3153 pHba->name,
3154 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3155 : "PARAMS_GET",
3156 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3157 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3158 }
3159
3160 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3161}
3162
3163
3164static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3165{
3166 u32 msg[4];
3167 int ret;
3168
3169 adpt_i2o_status_get(pHba);
3170
3171 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3172
3173 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3174 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3175 return 0;
3176 }
3177
3178 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3179 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3180 msg[2] = 0;
3181 msg[3] = 0;
3182
3183 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3184 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3185 pHba->unit, -ret);
3186 } else {
3187 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3188 }
3189
3190 adpt_i2o_status_get(pHba);
3191 return ret;
3192}
3193
3194
3195/*
3196 * Enable IOP. Allows the IOP to resume external operations.
3197 */
3198static int adpt_i2o_enable_hba(adpt_hba* pHba)
3199{
3200 u32 msg[4];
3201 int ret;
3202
3203 adpt_i2o_status_get(pHba);
3204 if(!pHba->status_block){
3205 return -ENOMEM;
3206 }
3207 /* Enable only allowed on READY state */
3208 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3209 return 0;
3210
3211 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3212 return -EINVAL;
3213
3214 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3215 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3216 msg[2]= 0;
3217 msg[3]= 0;
3218
3219 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3220 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3221 pHba->name, ret);
3222 } else {
3223 PDEBUG("%s: Enabled.\n", pHba->name);
3224 }
3225
3226 adpt_i2o_status_get(pHba);
3227 return ret;
3228}
3229
3230
3231static int adpt_i2o_systab_send(adpt_hba* pHba)
3232{
3233 u32 msg[12];
3234 int ret;
3235
3236 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3237 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3238 msg[2] = 0;
3239 msg[3] = 0;
3240 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3241 msg[5] = 0; /* Segment 0 */
3242
3243 /*
3244 * Provide three SGL-elements:
3245 * System table (SysTab), Private memory space declaration and
3246 * Private i/o space declaration
3247 */
3248 msg[6] = 0x54000000 | sys_tbl_len;
3249 msg[7] = virt_to_phys(sys_tbl);
3250 msg[8] = 0x54000000 | 0;
3251 msg[9] = 0;
3252 msg[10] = 0xD4000000 | 0;
3253 msg[11] = 0;
3254
3255 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3256 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3257 pHba->name, ret);
3258 }
3259#ifdef DEBUG
3260 else {
3261 PINFO("%s: SysTab set.\n", pHba->name);
3262 }
3263#endif
3264
3265 return ret;
3266 }
3267
3268
3269/*============================================================================
3270 *
3271 *============================================================================
3272 */
3273
3274
3275#ifdef UARTDELAY
3276
3277static static void adpt_delay(int millisec)
3278{
3279 int i;
3280 for (i = 0; i < millisec; i++) {
3281 udelay(1000); /* delay for one millisecond */
3282 }
3283}
3284
3285#endif
3286
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -06003287static struct scsi_host_template adpt_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 .name = "dpt_i2o",
3289 .proc_name = "dpt_i2o",
3290 .proc_info = adpt_proc_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 .info = adpt_info,
3292 .queuecommand = adpt_queue,
3293 .eh_abort_handler = adpt_abort,
3294 .eh_device_reset_handler = adpt_device_reset,
3295 .eh_bus_reset_handler = adpt_bus_reset,
3296 .eh_host_reset_handler = adpt_reset,
3297 .bios_param = adpt_bios_param,
3298 .slave_configure = adpt_slave_configure,
3299 .can_queue = MAX_TO_IOP_MESSAGES,
3300 .this_id = 7,
3301 .cmd_per_lun = 1,
3302 .use_clustering = ENABLE_CLUSTERING,
3303};
Matthew Wilcox55d9fcf2007-07-30 15:19:18 -06003304
3305static s32 adpt_scsi_register(adpt_hba* pHba)
3306{
3307 struct Scsi_Host *host;
3308
3309 host = scsi_host_alloc(&adpt_template, sizeof(adpt_hba*));
3310 if (host == NULL) {
3311 printk ("%s: scsi_host_alloc returned NULL\n",pHba->name);
3312 return -1;
3313 }
3314 host->hostdata[0] = (unsigned long)pHba;
3315 pHba->host = host;
3316
3317 host->irq = pHba->pDev->irq;
3318 /* no IO ports, so don't have to set host->io_port and
3319 * host->n_io_port
3320 */
3321 host->io_port = 0;
3322 host->n_io_port = 0;
3323 /* see comments in scsi_host.h */
3324 host->max_id = 16;
3325 host->max_lun = 256;
3326 host->max_channel = pHba->top_scsi_channel + 1;
3327 host->cmd_per_lun = 1;
3328 host->unique_id = (uint) pHba;
3329 host->sg_tablesize = pHba->sg_tablesize;
3330 host->can_queue = pHba->post_fifo_size;
3331
3332 if (scsi_add_host(host, &pHba->pDev->dev)) {
3333 scsi_host_put(host);
3334 return -1;
3335 }
3336
3337 return 0;
3338}
3339
3340static int __init adpt_init(void)
3341{
3342 int count;
3343
3344 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3345#ifdef REBOOT_NOTIFIER
3346 register_reboot_notifier(&adpt_reboot_notifier);
3347#endif
3348
3349 count = adpt_detect();
3350
3351 return count > 0 ? 0 : -ENODEV;
3352}
3353
3354static void __exit adpt_exit(void)
3355{
3356 while (hba_chain)
3357 adpt_release(hba_chain);
3358}
3359
3360module_init(adpt_init);
3361module_exit(adpt_exit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362MODULE_LICENSE("GPL");