blob: dc6b2d4a9aa117facf4d6210489134957ec007a6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080053#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010058#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
78static dpt_sig_S DPTI_sig = {
79 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
80#ifdef __i386__
81 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
82#elif defined(__ia64__)
83 PROC_INTEL, PROC_IA64,
84#elif defined(__sparc__)
85 PROC_ULTRASPARC, PROC_ULTRASPARC,
86#elif defined(__alpha__)
87 PROC_ALPHA, PROC_ALPHA,
88#else
89 (-1),(-1),
90#endif
91 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
92 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
93 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
94};
95
96
97
98
99/*============================================================================
100 * Globals
101 *============================================================================
102 */
103
Arjan van de Ven0b950672006-01-11 13:16:10 +0100104static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200106static struct i2o_sys_tbl *sys_tbl;
107static dma_addr_t sys_tbl_pa;
108static int sys_tbl_ind;
109static int sys_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static adpt_hba* hba_chain = NULL;
112static int hba_count = 0;
113
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200114#ifdef CONFIG_COMPAT
115static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
116#endif
117
Arjan van de Ven00977a52007-02-12 00:55:34 -0800118static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .ioctl = adpt_ioctl,
120 .open = adpt_open,
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200121 .release = adpt_close,
122#ifdef CONFIG_COMPAT
123 .compat_ioctl = compat_adpt_ioctl,
124#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125};
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* Structures and definitions for synchronous message posting.
128 * See adpt_i2o_post_wait() for description
129 * */
130struct adpt_i2o_post_wait_data
131{
132 int status;
133 u32 id;
134 adpt_wait_queue_head_t *wq;
135 struct adpt_i2o_post_wait_data *next;
136};
137
138static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
139static u32 adpt_post_wait_id = 0;
140static DEFINE_SPINLOCK(adpt_post_wait_lock);
141
142
143/*============================================================================
144 * Functions
145 *============================================================================
146 */
147
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200148static inline int dpt_dma64(adpt_hba *pHba)
149{
150 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
151}
152
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200153static inline u32 dma_high(dma_addr_t addr)
154{
155 return upper_32_bits(addr);
156}
157
158static inline u32 dma_low(dma_addr_t addr)
159{
160 return (u32)addr;
161}
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163static u8 adpt_read_blink_led(adpt_hba* host)
164{
165 if(host->FwDebugBLEDflag_P != 0) {
166 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
167 return readb(host->FwDebugBLEDvalue_P);
168 }
169 }
170 return 0;
171}
172
173/*============================================================================
174 * Scsi host template interface functions
175 *============================================================================
176 */
177
178static struct pci_device_id dptids[] = {
179 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
180 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
181 { 0, }
182};
183MODULE_DEVICE_TABLE(pci,dptids);
184
Andrew Morton24601bb2007-12-10 15:49:20 -0800185static int adpt_detect(struct scsi_host_template* sht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct pci_dev *pDev = NULL;
188 adpt_hba* pHba;
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 PINFO("Detecting Adaptec I2O RAID controllers...\n");
191
192 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100193 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 if(pDev->device == PCI_DPT_DEVICE_ID ||
195 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Andrew Morton24601bb2007-12-10 15:49:20 -0800196 if(adpt_install_hba(sht, pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 PERROR("Could not Init an I2O RAID device\n");
198 PERROR("Will not try to detect others.\n");
199 return hba_count-1;
200 }
Alan Coxa07f3532006-09-15 15:34:32 +0100201 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
203 }
204
205 /* In INIT state, Activate IOPs */
206 for (pHba = hba_chain; pHba; pHba = pHba->next) {
207 // Activate does get status , init outbound, and get hrt
208 if (adpt_i2o_activate_hba(pHba) < 0) {
209 adpt_i2o_delete_hba(pHba);
210 }
211 }
212
213
214 /* Active IOPs in HOLD state */
215
216rebuild_sys_tab:
217 if (hba_chain == NULL)
218 return 0;
219
220 /*
221 * If build_sys_table fails, we kill everything and bail
222 * as we can't init the IOPs w/o a system table
223 */
224 if (adpt_i2o_build_sys_table() < 0) {
225 adpt_i2o_sys_shutdown();
226 return 0;
227 }
228
229 PDEBUG("HBA's in HOLD state\n");
230
231 /* If IOP don't get online, we need to rebuild the System table */
232 for (pHba = hba_chain; pHba; pHba = pHba->next) {
233 if (adpt_i2o_online_hba(pHba) < 0) {
234 adpt_i2o_delete_hba(pHba);
235 goto rebuild_sys_tab;
236 }
237 }
238
239 /* Active IOPs now in OPERATIONAL state */
240 PDEBUG("HBA's in OPERATIONAL state\n");
241
242 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
243 for (pHba = hba_chain; pHba; pHba = pHba->next) {
244 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
245 if (adpt_i2o_lct_get(pHba) < 0){
246 adpt_i2o_delete_hba(pHba);
247 continue;
248 }
249
250 if (adpt_i2o_parse_lct(pHba) < 0){
251 adpt_i2o_delete_hba(pHba);
252 continue;
253 }
254 adpt_inquiry(pHba);
255 }
256
257 for (pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +0200258 if (adpt_scsi_host_alloc(pHba, sht) < 0){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 pHba->initialized = TRUE;
263 pHba->state &= ~DPTI_STATE_RESET;
264 }
265
266 // Register our control device node
267 // nodes will need to be created in /dev to access this
268 // the nodes can not be created from within the driver
269 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Andrew Morton24601bb2007-12-10 15:49:20 -0800270 adpt_i2o_sys_shutdown();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 return 0;
272 }
273 return hba_count;
274}
275
276
Andrew Morton24601bb2007-12-10 15:49:20 -0800277/*
278 * scsi_unregister will be called AFTER we return.
279 */
280static int adpt_release(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Andrew Morton24601bb2007-12-10 15:49:20 -0800282 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283// adpt_i2o_quiesce_hba(pHba);
284 adpt_i2o_delete_hba(pHba);
Andrew Morton24601bb2007-12-10 15:49:20 -0800285 scsi_unregister(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 return 0;
287}
288
289
290static void adpt_inquiry(adpt_hba* pHba)
291{
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200292 u32 msg[17];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 u32 *mptr;
294 u32 *lenptr;
295 int direction;
296 int scsidir;
297 u32 len;
298 u32 reqlen;
299 u8* buf;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200300 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 u8 scb[16];
302 s32 rcode;
303
304 memset(msg, 0, sizeof(msg));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200305 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 if(!buf){
307 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
308 return;
309 }
310 memset((void*)buf, 0, 36);
311
312 len = 36;
313 direction = 0x00000000;
314 scsidir =0x40000000; // DATA IN (iop<--dev)
315
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200316 if (dpt_dma64(pHba))
317 reqlen = 17; // SINGLE SGE, 64 bit
318 else
319 reqlen = 14; // SINGLE SGE, 32 bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 /* Stick the headers on */
321 msg[0] = reqlen<<16 | SGL_OFFSET_12;
322 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
323 msg[2] = 0;
324 msg[3] = 0;
325 // Adaptec/DPT Private stuff
326 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
327 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
328 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
329 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
330 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
331 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
332 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
333
334 mptr=msg+7;
335
336 memset(scb, 0, sizeof(scb));
337 // Write SCSI command into the message - always 16 byte block
338 scb[0] = INQUIRY;
339 scb[1] = 0;
340 scb[2] = 0;
341 scb[3] = 0;
342 scb[4] = 36;
343 scb[5] = 0;
344 // Don't care about the rest of scb
345
346 memcpy(mptr, scb, sizeof(scb));
347 mptr+=4;
348 lenptr=mptr++; /* Remember me - fill in when we know */
349
350 /* Now fill in the SGList and command */
351 *lenptr = len;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200352 if (dpt_dma64(pHba)) {
353 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
354 *mptr++ = 1 << PAGE_SHIFT;
355 *mptr++ = 0xD0000000|direction|len;
356 *mptr++ = dma_low(addr);
357 *mptr++ = dma_high(addr);
358 } else {
359 *mptr++ = 0xD0000000|direction|len;
360 *mptr++ = addr;
361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 // Send it on it's way
364 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
365 if (rcode != 0) {
366 sprintf(pHba->detail, "Adaptec I2O RAID");
367 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
368 if (rcode != -ETIME && rcode != -EINTR)
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200369 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 } else {
371 memset(pHba->detail, 0, sizeof(pHba->detail));
372 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
373 memcpy(&(pHba->detail[16]), " Model: ", 8);
374 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
375 memcpy(&(pHba->detail[40]), " FW: ", 4);
376 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
377 pHba->detail[48] = '\0'; /* precautionary */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200378 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
380 adpt_i2o_status_get(pHba);
381 return ;
382}
383
384
385static int adpt_slave_configure(struct scsi_device * device)
386{
387 struct Scsi_Host *host = device->host;
388 adpt_hba* pHba;
389
390 pHba = (adpt_hba *) host->hostdata[0];
391
392 if (host->can_queue && device->tagged_supported) {
393 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
394 host->can_queue - 1);
395 } else {
396 scsi_adjust_queue_depth(device, 0, 1);
397 }
398 return 0;
399}
400
401static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
402{
403 adpt_hba* pHba = NULL;
404 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 cmd->scsi_done = done;
407 /*
408 * SCSI REQUEST_SENSE commands will be executed automatically by the
409 * Host Adapter for any errors, so they should not be executed
410 * explicitly unless the Sense Data is zero indicating that no error
411 * occurred.
412 */
413
414 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
415 cmd->result = (DID_OK << 16);
416 cmd->scsi_done(cmd);
417 return 0;
418 }
419
420 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
421 if (!pHba) {
422 return FAILED;
423 }
424
425 rmb();
426 /*
427 * TODO: I need to block here if I am processing ioctl cmds
428 * but if the outstanding cmds all finish before the ioctl,
429 * the scsi-core will not know to start sending cmds to me again.
430 * I need to a way to restart the scsi-cores queues or should I block
431 * calling scsi_done on the outstanding cmds instead
432 * for now we don't set the IOCTL state
433 */
434 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
435 pHba->host->last_reset = jiffies;
436 pHba->host->resetting = 1;
437 return 1;
438 }
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 // TODO if the cmd->device if offline then I may need to issue a bus rescan
441 // followed by a get_lct to see if the device is there anymore
442 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
443 /*
444 * First command request for this device. Set up a pointer
445 * to the device structure. This should be a TEST_UNIT_READY
446 * command from scan_scsis_single.
447 */
448 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
449 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
450 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
451 cmd->result = (DID_NO_CONNECT << 16);
452 cmd->scsi_done(cmd);
453 return 0;
454 }
455 cmd->device->hostdata = pDev;
456 }
457 pDev->pScsi_dev = cmd->device;
458
459 /*
460 * If we are being called from when the device is being reset,
461 * delay processing of the command until later.
462 */
463 if (pDev->state & DPTI_DEV_RESET ) {
464 return FAILED;
465 }
466 return adpt_scsi_to_i2o(pHba, cmd, pDev);
467}
468
469static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
470 sector_t capacity, int geom[])
471{
472 int heads=-1;
473 int sectors=-1;
474 int cylinders=-1;
475
476 // *** First lets set the default geometry ****
477
478 // If the capacity is less than ox2000
479 if (capacity < 0x2000 ) { // floppy
480 heads = 18;
481 sectors = 2;
482 }
483 // else if between 0x2000 and 0x20000
484 else if (capacity < 0x20000) {
485 heads = 64;
486 sectors = 32;
487 }
488 // else if between 0x20000 and 0x40000
489 else if (capacity < 0x40000) {
490 heads = 65;
491 sectors = 63;
492 }
493 // else if between 0x4000 and 0x80000
494 else if (capacity < 0x80000) {
495 heads = 128;
496 sectors = 63;
497 }
498 // else if greater than 0x80000
499 else {
500 heads = 255;
501 sectors = 63;
502 }
503 cylinders = sector_div(capacity, heads * sectors);
504
505 // Special case if CDROM
506 if(sdev->type == 5) { // CDROM
507 heads = 252;
508 sectors = 63;
509 cylinders = 1111;
510 }
511
512 geom[0] = heads;
513 geom[1] = sectors;
514 geom[2] = cylinders;
515
516 PDEBUG("adpt_bios_param: exit\n");
517 return 0;
518}
519
520
521static const char *adpt_info(struct Scsi_Host *host)
522{
523 adpt_hba* pHba;
524
525 pHba = (adpt_hba *) host->hostdata[0];
526 return (char *) (pHba->detail);
527}
528
529static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
530 int length, int inout)
531{
532 struct adpt_device* d;
533 int id;
534 int chan;
535 int len = 0;
536 int begin = 0;
537 int pos = 0;
538 adpt_hba* pHba;
539 int unit;
540
541 *start = buffer;
542 if (inout == TRUE) {
543 /*
544 * The user has done a write and wants us to take the
545 * data in the buffer and do something with it.
546 * proc_scsiwrite calls us with inout = 1
547 *
548 * Read data from buffer (writing to us) - NOT SUPPORTED
549 */
550 return -EINVAL;
551 }
552
553 /*
554 * inout = 0 means the user has done a read and wants information
555 * returned, so we write information about the cards into the buffer
556 * proc_scsiread() calls us with inout = 0
557 */
558
559 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100560 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 for (pHba = hba_chain; pHba; pHba = pHba->next) {
562 if (pHba->host == host) {
563 break; /* found adapter */
564 }
565 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100566 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 if (pHba == NULL) {
568 return 0;
569 }
570 host = pHba->host;
571
572 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
573 len += sprintf(buffer+len, "%s\n", pHba->detail);
574 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
575 pHba->host->host_no, pHba->name, host->irq);
576 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
577 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
578
579 pos = begin + len;
580
581 /* CHECKPOINT */
582 if(pos > offset + length) {
583 goto stop_output;
584 }
585 if(pos <= offset) {
586 /*
587 * If we haven't even written to where we last left
588 * off (the last time we were called), reset the
589 * beginning pointer.
590 */
591 len = 0;
592 begin = pos;
593 }
594 len += sprintf(buffer+len, "Devices:\n");
595 for(chan = 0; chan < MAX_CHANNEL; chan++) {
596 for(id = 0; id < MAX_ID; id++) {
597 d = pHba->channel[chan].device[id];
598 while(d){
599 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
600 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
601 pos = begin + len;
602
603
604 /* CHECKPOINT */
605 if(pos > offset + length) {
606 goto stop_output;
607 }
608 if(pos <= offset) {
609 len = 0;
610 begin = pos;
611 }
612
613 unit = d->pI2o_dev->lct_data.tid;
614 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
615 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
616 scsi_device_online(d->pScsi_dev)? "online":"offline");
617 pos = begin + len;
618
619 /* CHECKPOINT */
620 if(pos > offset + length) {
621 goto stop_output;
622 }
623 if(pos <= offset) {
624 len = 0;
625 begin = pos;
626 }
627
628 d = d->next_lun;
629 }
630 }
631 }
632
633 /*
634 * begin is where we last checked our position with regards to offset
635 * begin is always less than offset. len is relative to begin. It
636 * is the number of bytes written past begin
637 *
638 */
639stop_output:
640 /* stop the output and calculate the correct length */
641 *(buffer + len) = '\0';
642
643 *start = buffer + (offset - begin); /* Start of wanted data */
644 len -= (offset - begin);
645 if(len > length) {
646 len = length;
647 } else if(len < 0){
648 len = 0;
649 **start = '\0';
650 }
651 return len;
652}
653
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200654/*
655 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
656 */
657static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
658{
659 return (u32)cmd->serial_number;
660}
661
662/*
663 * Go from a u32 'context' to a struct scsi_cmnd * .
664 * This could probably be made more efficient.
665 */
666static struct scsi_cmnd *
667 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
668{
669 struct scsi_cmnd * cmd;
670 struct scsi_device * d;
671
672 if (context == 0)
673 return NULL;
674
675 spin_unlock(pHba->host->host_lock);
676 shost_for_each_device(d, pHba->host) {
677 unsigned long flags;
678 spin_lock_irqsave(&d->list_lock, flags);
679 list_for_each_entry(cmd, &d->cmd_list, list) {
680 if (((u32)cmd->serial_number == context)) {
681 spin_unlock_irqrestore(&d->list_lock, flags);
682 scsi_device_put(d);
683 spin_lock(pHba->host->host_lock);
684 return cmd;
685 }
686 }
687 spin_unlock_irqrestore(&d->list_lock, flags);
688 }
689 spin_lock(pHba->host->host_lock);
690
691 return NULL;
692}
693
694/*
695 * Turn a pointer to ioctl reply data into an u32 'context'
696 */
697static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
698{
699#if BITS_PER_LONG == 32
700 return (u32)(unsigned long)reply;
701#else
702 ulong flags = 0;
703 u32 nr, i;
704
705 spin_lock_irqsave(pHba->host->host_lock, flags);
706 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
707 for (i = 0; i < nr; i++) {
708 if (pHba->ioctl_reply_context[i] == NULL) {
709 pHba->ioctl_reply_context[i] = reply;
710 break;
711 }
712 }
713 spin_unlock_irqrestore(pHba->host->host_lock, flags);
714 if (i >= nr) {
715 kfree (reply);
716 printk(KERN_WARNING"%s: Too many outstanding "
717 "ioctl commands\n", pHba->name);
718 return (u32)-1;
719 }
720
721 return i;
722#endif
723}
724
725/*
726 * Go from an u32 'context' to a pointer to ioctl reply data.
727 */
728static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
729{
730#if BITS_PER_LONG == 32
731 return (void *)(unsigned long)context;
732#else
733 void *p = pHba->ioctl_reply_context[context];
734 pHba->ioctl_reply_context[context] = NULL;
735
736 return p;
737#endif
738}
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740/*===========================================================================
741 * Error Handling routines
742 *===========================================================================
743 */
744
745static int adpt_abort(struct scsi_cmnd * cmd)
746{
747 adpt_hba* pHba = NULL; /* host bus adapter structure */
748 struct adpt_device* dptdevice; /* dpt per device information */
749 u32 msg[5];
750 int rcode;
751
752 if(cmd->serial_number == 0){
753 return FAILED;
754 }
755 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
756 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
757 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
758 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
759 return FAILED;
760 }
761
762 memset(msg, 0, sizeof(msg));
763 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
764 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
765 msg[2] = 0;
766 msg[3]= 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200767 msg[4] = adpt_cmd_to_context(cmd);
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800768 if (pHba->host)
769 spin_lock_irq(pHba->host->host_lock);
770 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
771 if (pHba->host)
772 spin_unlock_irq(pHba->host->host_lock);
773 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 if(rcode == -EOPNOTSUPP ){
775 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
776 return FAILED;
777 }
778 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
779 return FAILED;
780 }
781 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
782 return SUCCESS;
783}
784
785
786#define I2O_DEVICE_RESET 0x27
787// This is the same for BLK and SCSI devices
788// NOTE this is wrong in the i2o.h definitions
789// This is not currently supported by our adapter but we issue it anyway
790static int adpt_device_reset(struct scsi_cmnd* cmd)
791{
792 adpt_hba* pHba;
793 u32 msg[4];
794 u32 rcode;
795 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700796 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798 pHba = (void*) cmd->device->host->hostdata[0];
799 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
800 if (!d) {
801 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
802 return FAILED;
803 }
804 memset(msg, 0, sizeof(msg));
805 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
806 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
807 msg[2] = 0;
808 msg[3] = 0;
809
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800810 if (pHba->host)
811 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 old_state = d->state;
813 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800814 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
815 d->state = old_state;
816 if (pHba->host)
817 spin_unlock_irq(pHba->host->host_lock);
818 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 if(rcode == -EOPNOTSUPP ){
820 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
821 return FAILED;
822 }
823 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
824 return FAILED;
825 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
827 return SUCCESS;
828 }
829}
830
831
832#define I2O_HBA_BUS_RESET 0x87
833// This version of bus reset is called by the eh_error handler
834static int adpt_bus_reset(struct scsi_cmnd* cmd)
835{
836 adpt_hba* pHba;
837 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800838 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
840 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
841 memset(msg, 0, sizeof(msg));
842 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
843 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
844 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
845 msg[2] = 0;
846 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800847 if (pHba->host)
848 spin_lock_irq(pHba->host->host_lock);
849 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
850 if (pHba->host)
851 spin_unlock_irq(pHba->host->host_lock);
852 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
854 return FAILED;
855 } else {
856 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
857 return SUCCESS;
858 }
859}
860
861// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400862static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
864 adpt_hba* pHba;
865 int rcode;
866 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
867 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
868 rcode = adpt_hba_reset(pHba);
869 if(rcode == 0){
870 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
871 return SUCCESS;
872 } else {
873 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
874 return FAILED;
875 }
876}
877
Jeff Garzik df0ae242005-05-28 07:57:14 -0400878static int adpt_reset(struct scsi_cmnd* cmd)
879{
880 int rc;
881
882 spin_lock_irq(cmd->device->host->host_lock);
883 rc = __adpt_reset(cmd);
884 spin_unlock_irq(cmd->device->host->host_lock);
885
886 return rc;
887}
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
890static int adpt_hba_reset(adpt_hba* pHba)
891{
892 int rcode;
893
894 pHba->state |= DPTI_STATE_RESET;
895
896 // Activate does get status , init outbound, and get hrt
897 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
898 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
899 adpt_i2o_delete_hba(pHba);
900 return rcode;
901 }
902
903 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
904 adpt_i2o_delete_hba(pHba);
905 return rcode;
906 }
907 PDEBUG("%s: in HOLD state\n",pHba->name);
908
909 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
910 adpt_i2o_delete_hba(pHba);
911 return rcode;
912 }
913 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
914
915 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
916 adpt_i2o_delete_hba(pHba);
917 return rcode;
918 }
919
920 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
921 adpt_i2o_delete_hba(pHba);
922 return rcode;
923 }
924 pHba->state &= ~DPTI_STATE_RESET;
925
926 adpt_fail_posted_scbs(pHba);
927 return 0; /* return success */
928}
929
930/*===========================================================================
931 *
932 *===========================================================================
933 */
934
935
936static void adpt_i2o_sys_shutdown(void)
937{
938 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100939 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
941 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
942 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
943 /* Delete all IOPs from the controller chain */
944 /* They should have already been released by the
945 * scsi-core
946 */
947 for (pHba = hba_chain; pHba; pHba = pNext) {
948 pNext = pHba->next;
949 adpt_i2o_delete_hba(pHba);
950 }
951
952 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953// spin_lock_irqsave(&adpt_post_wait_lock, flags);
954 /* Nothing should be outstanding at this point so just
955 * free them
956 */
Adrian Bunk458af542005-11-27 00:36:37 +0100957 for(p1 = adpt_post_wait_queue; p1;) {
958 old = p1;
959 p1 = p1->next;
960 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 }
962// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
963 adpt_post_wait_queue = NULL;
964
965 printk(KERN_INFO "Adaptec I2O controllers down.\n");
966}
967
Andrew Morton24601bb2007-12-10 15:49:20 -0800968static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
970
971 adpt_hba* pHba = NULL;
972 adpt_hba* p = NULL;
973 ulong base_addr0_phys = 0;
974 ulong base_addr1_phys = 0;
975 u32 hba_map0_area_size = 0;
976 u32 hba_map1_area_size = 0;
977 void __iomem *base_addr_virt = NULL;
978 void __iomem *msg_addr_virt = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200979 int dma64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
981 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 if(pci_enable_device(pDev)) {
984 return -EINVAL;
985 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500986
987 if (pci_request_regions(pDev, "dpt_i2o")) {
988 PERROR("dpti: adpt_config_hba: pci request region failed\n");
989 return -EINVAL;
990 }
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 pci_set_master(pDev);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200993
994 /*
995 * See if we should enable dma64 mode.
996 */
997 if (sizeof(dma_addr_t) > 4 &&
998 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
999 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1000 dma64 = 1;
1001 }
1002 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return -EINVAL;
1004
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001005 /* adapter only supports message blocks below 4GB */
1006 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 base_addr0_phys = pci_resource_start(pDev,0);
1009 hba_map0_area_size = pci_resource_len(pDev,0);
1010
1011 // Check if standard PCI card or single BAR Raptor
1012 if(pDev->device == PCI_DPT_DEVICE_ID){
1013 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1014 // Raptor card with this device id needs 4M
1015 hba_map0_area_size = 0x400000;
1016 } else { // Not Raptor - it is a PCI card
1017 if(hba_map0_area_size > 0x100000 ){
1018 hba_map0_area_size = 0x100000;
1019 }
1020 }
1021 } else {// Raptor split BAR config
1022 // Use BAR1 in this configuration
1023 base_addr1_phys = pci_resource_start(pDev,1);
1024 hba_map1_area_size = pci_resource_len(pDev,1);
1025 raptorFlag = TRUE;
1026 }
1027
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001028#if BITS_PER_LONG == 64
1029 /*
1030 * The original Adaptec 64 bit driver has this comment here:
1031 * "x86_64 machines need more optimal mappings"
1032 *
1033 * I assume some HBAs report ridiculously large mappings
1034 * and we need to limit them on platforms with IOMMUs.
1035 */
1036 if (raptorFlag == TRUE) {
1037 if (hba_map0_area_size > 128)
1038 hba_map0_area_size = 128;
1039 if (hba_map1_area_size > 524288)
1040 hba_map1_area_size = 524288;
1041 } else {
1042 if (hba_map0_area_size > 524288)
1043 hba_map0_area_size = 524288;
1044 }
1045#endif
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1048 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -05001049 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 PERROR("dpti: adpt_config_hba: io remap failed\n");
1051 return -EINVAL;
1052 }
1053
1054 if(raptorFlag == TRUE) {
1055 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1056 if (!msg_addr_virt) {
1057 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1058 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001059 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return -EINVAL;
1061 }
1062 } else {
1063 msg_addr_virt = base_addr_virt;
1064 }
1065
1066 // Allocate and zero the data structure
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02001067 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1068 if (!pHba) {
1069 if (msg_addr_virt != base_addr_virt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 iounmap(msg_addr_virt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001072 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 return -ENOMEM;
1074 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Arjan van de Ven0b950672006-01-11 13:16:10 +01001076 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if(hba_chain != NULL){
1079 for(p = hba_chain; p->next; p = p->next);
1080 p->next = pHba;
1081 } else {
1082 hba_chain = pHba;
1083 }
1084 pHba->next = NULL;
1085 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -07001086 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 hba_count++;
1088
Arjan van de Ven0b950672006-01-11 13:16:10 +01001089 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
1091 pHba->pDev = pDev;
1092 pHba->base_addr_phys = base_addr0_phys;
1093
1094 // Set up the Virtual Base Address of the I2O Device
1095 pHba->base_addr_virt = base_addr_virt;
1096 pHba->msg_addr_virt = msg_addr_virt;
1097 pHba->irq_mask = base_addr_virt+0x30;
1098 pHba->post_port = base_addr_virt+0x40;
1099 pHba->reply_port = base_addr_virt+0x44;
1100
1101 pHba->hrt = NULL;
1102 pHba->lct = NULL;
1103 pHba->lct_size = 0;
1104 pHba->status_block = NULL;
1105 pHba->post_count = 0;
1106 pHba->state = DPTI_STATE_RESET;
1107 pHba->pDev = pDev;
1108 pHba->devices = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001109 pHba->dma64 = dma64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 // Initializing the spinlocks
1112 spin_lock_init(&pHba->state_lock);
1113 spin_lock_init(&adpt_post_wait_lock);
1114
1115 if(raptorFlag == 0){
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001116 printk(KERN_INFO "Adaptec I2O RAID controller"
1117 " %d at %p size=%x irq=%d%s\n",
1118 hba_count-1, base_addr_virt,
1119 hba_map0_area_size, pDev->irq,
1120 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 } else {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001122 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1123 hba_count-1, pDev->irq,
1124 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1126 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1127 }
1128
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001129 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1131 adpt_i2o_delete_hba(pHba);
1132 return -EINVAL;
1133 }
1134
1135 return 0;
1136}
1137
1138
1139static void adpt_i2o_delete_hba(adpt_hba* pHba)
1140{
1141 adpt_hba* p1;
1142 adpt_hba* p2;
1143 struct i2o_device* d;
1144 struct i2o_device* next;
1145 int i;
1146 int j;
1147 struct adpt_device* pDev;
1148 struct adpt_device* pNext;
1149
1150
Arjan van de Ven0b950672006-01-11 13:16:10 +01001151 mutex_lock(&adpt_configuration_lock);
Andrew Morton24601bb2007-12-10 15:49:20 -08001152 // scsi_unregister calls our adpt_release which
1153 // does a quiese
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 if(pHba->host){
1155 free_irq(pHba->host->irq, pHba);
1156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 p2 = NULL;
1158 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1159 if(p1 == pHba) {
1160 if(p2) {
1161 p2->next = p1->next;
1162 } else {
1163 hba_chain = p1->next;
1164 }
1165 break;
1166 }
1167 }
1168
1169 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001170 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001173 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1175 iounmap(pHba->msg_addr_virt);
1176 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001177 if(pHba->FwDebugBuffer_P)
1178 iounmap(pHba->FwDebugBuffer_P);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001179 if(pHba->hrt) {
1180 dma_free_coherent(&pHba->pDev->dev,
1181 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1182 pHba->hrt, pHba->hrt_pa);
1183 }
1184 if(pHba->lct) {
1185 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1186 pHba->lct, pHba->lct_pa);
1187 }
1188 if(pHba->status_block) {
1189 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1190 pHba->status_block, pHba->status_block_pa);
1191 }
1192 if(pHba->reply_pool) {
1193 dma_free_coherent(&pHba->pDev->dev,
1194 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1195 pHba->reply_pool, pHba->reply_pool_pa);
1196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 for(d = pHba->devices; d ; d = next){
1199 next = d->next;
1200 kfree(d);
1201 }
1202 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1203 for(j = 0; j < MAX_ID; j++){
1204 if(pHba->channel[i].device[j] != NULL){
1205 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1206 pNext = pDev->next_lun;
1207 kfree(pDev);
1208 }
1209 }
1210 }
1211 }
Alan Coxa07f3532006-09-15 15:34:32 +01001212 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 kfree(pHba);
1214
1215 if(hba_count <= 0){
1216 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1217 }
1218}
1219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1221{
1222 struct adpt_device* d;
1223
1224 if(chan < 0 || chan >= MAX_CHANNEL)
1225 return NULL;
1226
1227 if( pHba->channel[chan].device == NULL){
1228 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1229 return NULL;
1230 }
1231
1232 d = pHba->channel[chan].device[id];
1233 if(!d || d->tid == 0) {
1234 return NULL;
1235 }
1236
1237 /* If it is the only lun at that address then this should match*/
1238 if(d->scsi_lun == lun){
1239 return d;
1240 }
1241
1242 /* else we need to look through all the luns */
1243 for(d=d->next_lun ; d ; d = d->next_lun){
1244 if(d->scsi_lun == lun){
1245 return d;
1246 }
1247 }
1248 return NULL;
1249}
1250
1251
1252static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1253{
1254 // I used my own version of the WAIT_QUEUE_HEAD
1255 // to handle some version differences
1256 // When embedded in the kernel this could go back to the vanilla one
1257 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1258 int status = 0;
1259 ulong flags = 0;
1260 struct adpt_i2o_post_wait_data *p1, *p2;
1261 struct adpt_i2o_post_wait_data *wait_data =
1262 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001263 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Andrew Morton4452ea52005-06-23 00:10:26 -07001265 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 /*
1269 * The spin locking is needed to keep anyone from playing
1270 * with the queue pointers and id while we do the same
1271 */
1272 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1273 // TODO we need a MORE unique way of getting ids
1274 // to support async LCT get
1275 wait_data->next = adpt_post_wait_queue;
1276 adpt_post_wait_queue = wait_data;
1277 adpt_post_wait_id++;
1278 adpt_post_wait_id &= 0x7fff;
1279 wait_data->id = adpt_post_wait_id;
1280 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1281
1282 wait_data->wq = &adpt_wq_i2o_post;
1283 wait_data->status = -ETIMEDOUT;
1284
Andrew Morton4452ea52005-06-23 00:10:26 -07001285 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1288 timeout *= HZ;
1289 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1290 set_current_state(TASK_INTERRUPTIBLE);
1291 if(pHba->host)
1292 spin_unlock_irq(pHba->host->host_lock);
1293 if (!timeout)
1294 schedule();
1295 else{
1296 timeout = schedule_timeout(timeout);
1297 if (timeout == 0) {
1298 // I/O issued, but cannot get result in
1299 // specified time. Freeing resorces is
1300 // dangerous.
1301 status = -ETIME;
1302 }
1303 }
1304 if(pHba->host)
1305 spin_lock_irq(pHba->host->host_lock);
1306 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001307 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
1309 if(status == -ETIMEDOUT){
1310 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1311 // We will have to free the wait_data memory during shutdown
1312 return status;
1313 }
1314
1315 /* Remove the entry from the queue. */
1316 p2 = NULL;
1317 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1318 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1319 if(p1 == wait_data) {
1320 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1321 status = -EOPNOTSUPP;
1322 }
1323 if(p2) {
1324 p2->next = p1->next;
1325 } else {
1326 adpt_post_wait_queue = p1->next;
1327 }
1328 break;
1329 }
1330 }
1331 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1332
1333 kfree(wait_data);
1334
1335 return status;
1336}
1337
1338
1339static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1340{
1341
1342 u32 m = EMPTY_QUEUE;
1343 u32 __iomem *msg;
1344 ulong timeout = jiffies + 30*HZ;
1345 do {
1346 rmb();
1347 m = readl(pHba->post_port);
1348 if (m != EMPTY_QUEUE) {
1349 break;
1350 }
1351 if(time_after(jiffies,timeout)){
1352 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1353 return -ETIMEDOUT;
1354 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001355 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 } while(m == EMPTY_QUEUE);
1357
1358 msg = pHba->msg_addr_virt + m;
1359 memcpy_toio(msg, data, len);
1360 wmb();
1361
1362 //post message
1363 writel(m, pHba->post_port);
1364 wmb();
1365
1366 return 0;
1367}
1368
1369
1370static void adpt_i2o_post_wait_complete(u32 context, int status)
1371{
1372 struct adpt_i2o_post_wait_data *p1 = NULL;
1373 /*
1374 * We need to search through the adpt_post_wait
1375 * queue to see if the given message is still
1376 * outstanding. If not, it means that the IOP
1377 * took longer to respond to the message than we
1378 * had allowed and timer has already expired.
1379 * Not much we can do about that except log
1380 * it for debug purposes, increase timeout, and recompile
1381 *
1382 * Lock needed to keep anyone from moving queue pointers
1383 * around while we're looking through them.
1384 */
1385
1386 context &= 0x7fff;
1387
1388 spin_lock(&adpt_post_wait_lock);
1389 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1390 if(p1->id == context) {
1391 p1->status = status;
1392 spin_unlock(&adpt_post_wait_lock);
1393 wake_up_interruptible(p1->wq);
1394 return;
1395 }
1396 }
1397 spin_unlock(&adpt_post_wait_lock);
1398 // If this happens we lose commands that probably really completed
1399 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1400 printk(KERN_DEBUG" Tasks in wait queue:\n");
1401 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1402 printk(KERN_DEBUG" %d\n",p1->id);
1403 }
1404 return;
1405}
1406
1407static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1408{
1409 u32 msg[8];
1410 u8* status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001411 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 u32 m = EMPTY_QUEUE ;
1413 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1414
1415 if(pHba->initialized == FALSE) { // First time reset should be quick
1416 timeout = jiffies + (25*HZ);
1417 } else {
1418 adpt_i2o_quiesce_hba(pHba);
1419 }
1420
1421 do {
1422 rmb();
1423 m = readl(pHba->post_port);
1424 if (m != EMPTY_QUEUE) {
1425 break;
1426 }
1427 if(time_after(jiffies,timeout)){
1428 printk(KERN_WARNING"Timeout waiting for message!\n");
1429 return -ETIMEDOUT;
1430 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001431 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 } while (m == EMPTY_QUEUE);
1433
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001434 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 if(status == NULL) {
1436 adpt_send_nop(pHba, m);
1437 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1438 return -ENOMEM;
1439 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001440 memset(status,0,4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
1442 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1443 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1444 msg[2]=0;
1445 msg[3]=0;
1446 msg[4]=0;
1447 msg[5]=0;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001448 msg[6]=dma_low(addr);
1449 msg[7]=dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1452 wmb();
1453 writel(m, pHba->post_port);
1454 wmb();
1455
1456 while(*status == 0){
1457 if(time_after(jiffies,timeout)){
1458 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001459 /* We lose 4 bytes of "status" here, but we cannot
1460 free these because controller may awake and corrupt
1461 those bytes at any time */
1462 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return -ETIMEDOUT;
1464 }
1465 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001466 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 }
1468
1469 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1470 PDEBUG("%s: Reset in progress...\n", pHba->name);
1471 // Here we wait for message frame to become available
1472 // indicated that reset has finished
1473 do {
1474 rmb();
1475 m = readl(pHba->post_port);
1476 if (m != EMPTY_QUEUE) {
1477 break;
1478 }
1479 if(time_after(jiffies,timeout)){
1480 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001481 /* We lose 4 bytes of "status" here, but we
1482 cannot free these because controller may
1483 awake and corrupt those bytes at any time */
1484 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 return -ETIMEDOUT;
1486 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001487 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 } while (m == EMPTY_QUEUE);
1489 // Flush the offset
1490 adpt_send_nop(pHba, m);
1491 }
1492 adpt_i2o_status_get(pHba);
1493 if(*status == 0x02 ||
1494 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1495 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1496 pHba->name);
1497 } else {
1498 PDEBUG("%s: Reset completed.\n", pHba->name);
1499 }
1500
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001501 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502#ifdef UARTDELAY
1503 // This delay is to allow someone attached to the card through the debug UART to
1504 // set up the dump levels that they want before the rest of the initialization sequence
1505 adpt_delay(20000);
1506#endif
1507 return 0;
1508}
1509
1510
1511static int adpt_i2o_parse_lct(adpt_hba* pHba)
1512{
1513 int i;
1514 int max;
1515 int tid;
1516 struct i2o_device *d;
1517 i2o_lct *lct = pHba->lct;
1518 u8 bus_no = 0;
1519 s16 scsi_id;
1520 s16 scsi_lun;
1521 u32 buf[10]; // larger than 7, or 8 ...
1522 struct adpt_device* pDev;
1523
1524 if (lct == NULL) {
1525 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1526 return -1;
1527 }
1528
1529 max = lct->table_size;
1530 max -= 3;
1531 max /= 9;
1532
1533 for(i=0;i<max;i++) {
1534 if( lct->lct_entry[i].user_tid != 0xfff){
1535 /*
1536 * If we have hidden devices, we need to inform the upper layers about
1537 * the possible maximum id reference to handle device access when
1538 * an array is disassembled. This code has no other purpose but to
1539 * allow us future access to devices that are currently hidden
1540 * behind arrays, hotspares or have not been configured (JBOD mode).
1541 */
1542 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1543 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1544 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1545 continue;
1546 }
1547 tid = lct->lct_entry[i].tid;
1548 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1549 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1550 continue;
1551 }
1552 bus_no = buf[0]>>16;
1553 scsi_id = buf[1];
1554 scsi_lun = (buf[2]>>8 )&0xff;
1555 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1556 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1557 continue;
1558 }
1559 if (scsi_id >= MAX_ID){
1560 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1561 continue;
1562 }
1563 if(bus_no > pHba->top_scsi_channel){
1564 pHba->top_scsi_channel = bus_no;
1565 }
1566 if(scsi_id > pHba->top_scsi_id){
1567 pHba->top_scsi_id = scsi_id;
1568 }
1569 if(scsi_lun > pHba->top_scsi_lun){
1570 pHba->top_scsi_lun = scsi_lun;
1571 }
1572 continue;
1573 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001574 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if(d==NULL)
1576 {
1577 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1578 return -ENOMEM;
1579 }
1580
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001581 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 d->next = NULL;
1583
1584 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1585
1586 d->flags = 0;
1587 tid = d->lct_data.tid;
1588 adpt_i2o_report_hba_unit(pHba, d);
1589 adpt_i2o_install_device(pHba, d);
1590 }
1591 bus_no = 0;
1592 for(d = pHba->devices; d ; d = d->next) {
1593 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1594 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1595 tid = d->lct_data.tid;
1596 // TODO get the bus_no from hrt-but for now they are in order
1597 //bus_no =
1598 if(bus_no > pHba->top_scsi_channel){
1599 pHba->top_scsi_channel = bus_no;
1600 }
1601 pHba->channel[bus_no].type = d->lct_data.class_id;
1602 pHba->channel[bus_no].tid = tid;
1603 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1604 {
1605 pHba->channel[bus_no].scsi_id = buf[1];
1606 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1607 }
1608 // TODO remove - this is just until we get from hrt
1609 bus_no++;
1610 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1611 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1612 break;
1613 }
1614 }
1615 }
1616
1617 // Setup adpt_device table
1618 for(d = pHba->devices; d ; d = d->next) {
1619 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1620 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1621 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1622
1623 tid = d->lct_data.tid;
1624 scsi_id = -1;
1625 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1626 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1627 bus_no = buf[0]>>16;
1628 scsi_id = buf[1];
1629 scsi_lun = (buf[2]>>8 )&0xff;
1630 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1631 continue;
1632 }
1633 if (scsi_id >= MAX_ID) {
1634 continue;
1635 }
1636 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301637 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 if(pDev == NULL) {
1639 return -ENOMEM;
1640 }
1641 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 } else {
1643 for( pDev = pHba->channel[bus_no].device[scsi_id];
1644 pDev->next_lun; pDev = pDev->next_lun){
1645 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301646 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 if(pDev->next_lun == NULL) {
1648 return -ENOMEM;
1649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 pDev = pDev->next_lun;
1651 }
1652 pDev->tid = tid;
1653 pDev->scsi_channel = bus_no;
1654 pDev->scsi_id = scsi_id;
1655 pDev->scsi_lun = scsi_lun;
1656 pDev->pI2o_dev = d;
1657 d->owner = pDev;
1658 pDev->type = (buf[0])&0xff;
1659 pDev->flags = (buf[0]>>8)&0xff;
1660 if(scsi_id > pHba->top_scsi_id){
1661 pHba->top_scsi_id = scsi_id;
1662 }
1663 if(scsi_lun > pHba->top_scsi_lun){
1664 pHba->top_scsi_lun = scsi_lun;
1665 }
1666 }
1667 if(scsi_id == -1){
1668 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1669 d->lct_data.identity_tag);
1670 }
1671 }
1672 }
1673 return 0;
1674}
1675
1676
1677/*
1678 * Each I2O controller has a chain of devices on it - these match
1679 * the useful parts of the LCT of the board.
1680 */
1681
1682static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1683{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001684 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 d->controller=pHba;
1686 d->owner=NULL;
1687 d->next=pHba->devices;
1688 d->prev=NULL;
1689 if (pHba->devices != NULL){
1690 pHba->devices->prev=d;
1691 }
1692 pHba->devices=d;
1693 *d->dev_name = 0;
1694
Arjan van de Ven0b950672006-01-11 13:16:10 +01001695 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 return 0;
1697}
1698
1699static int adpt_open(struct inode *inode, struct file *file)
1700{
1701 int minor;
1702 adpt_hba* pHba;
1703
1704 //TODO check for root access
1705 //
1706 minor = iminor(inode);
1707 if (minor >= hba_count) {
1708 return -ENXIO;
1709 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001710 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1712 if (pHba->unit == minor) {
1713 break; /* found adapter */
1714 }
1715 }
1716 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001717 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 return -ENXIO;
1719 }
1720
1721// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001722 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723// return -EBUSY;
1724// }
1725
1726 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001727 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
1729 return 0;
1730}
1731
1732static int adpt_close(struct inode *inode, struct file *file)
1733{
1734 int minor;
1735 adpt_hba* pHba;
1736
1737 minor = iminor(inode);
1738 if (minor >= hba_count) {
1739 return -ENXIO;
1740 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001741 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1743 if (pHba->unit == minor) {
1744 break; /* found adapter */
1745 }
1746 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001747 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 if (pHba == NULL) {
1749 return -ENXIO;
1750 }
1751
1752 pHba->in_use = 0;
1753
1754 return 0;
1755}
1756
1757
1758static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1759{
1760 u32 msg[MAX_MESSAGE_SIZE];
1761 u32* reply = NULL;
1762 u32 size = 0;
1763 u32 reply_size = 0;
1764 u32 __user *user_msg = arg;
1765 u32 __user * user_reply = NULL;
1766 void *sg_list[pHba->sg_tablesize];
1767 u32 sg_offset = 0;
1768 u32 sg_count = 0;
1769 int sg_index = 0;
1770 u32 i = 0;
1771 u32 rcode = 0;
1772 void *p = NULL;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001773 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 ulong flags = 0;
1775
1776 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1777 // get user msg size in u32s
1778 if(get_user(size, &user_msg[0])){
1779 return -EFAULT;
1780 }
1781 size = size>>16;
1782
1783 user_reply = &user_msg[size];
1784 if(size > MAX_MESSAGE_SIZE){
1785 return -EFAULT;
1786 }
1787 size *= 4; // Convert to bytes
1788
1789 /* Copy in the user's I2O command */
1790 if(copy_from_user(msg, user_msg, size)) {
1791 return -EFAULT;
1792 }
1793 get_user(reply_size, &user_reply[0]);
1794 reply_size = reply_size>>16;
1795 if(reply_size > REPLY_FRAME_SIZE){
1796 reply_size = REPLY_FRAME_SIZE;
1797 }
1798 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301799 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 if(reply == NULL) {
1801 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1802 return -ENOMEM;
1803 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 sg_offset = (msg[0]>>4)&0xf;
1805 msg[2] = 0x40000000; // IOCTL context
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001806 msg[3] = adpt_ioctl_to_context(pHba, reply);
1807 if (msg[3] == (u32)-1)
1808 return -EBUSY;
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1811 if(sg_offset) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001812 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1814 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1815 if (sg_count > pHba->sg_tablesize){
1816 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1817 kfree (reply);
1818 return -EINVAL;
1819 }
1820
1821 for(i = 0; i < sg_count; i++) {
1822 int sg_size;
1823
1824 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1825 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1826 rcode = -EINVAL;
1827 goto cleanup;
1828 }
1829 sg_size = sg[i].flag_count & 0xffffff;
1830 /* Allocate memory for the transfer */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001831 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 if(!p) {
1833 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1834 pHba->name,sg_size,i,sg_count);
1835 rcode = -ENOMEM;
1836 goto cleanup;
1837 }
1838 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1839 /* Copy in the user's SG buffer if necessary */
1840 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001841 // sg_simple_element API is 32 bit
1842 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1844 rcode = -EFAULT;
1845 goto cleanup;
1846 }
1847 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001848 /* sg_simple_element API is 32 bit, but addr < 4GB */
1849 sg[i].addr_bus = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 }
1851 }
1852
1853 do {
1854 if(pHba->host)
1855 spin_lock_irqsave(pHba->host->host_lock, flags);
1856 // This state stops any new commands from enterring the
1857 // controller while processing the ioctl
1858// pHba->state |= DPTI_STATE_IOCTL;
1859// We can't set this now - The scsi subsystem sets host_blocked and
1860// the queue empties and stops. We need a way to restart the queue
1861 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1862 if (rcode != 0)
1863 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1864 rcode, reply);
1865// pHba->state &= ~DPTI_STATE_IOCTL;
1866 if(pHba->host)
1867 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1868 } while(rcode == -ETIMEDOUT);
1869
1870 if(rcode){
1871 goto cleanup;
1872 }
1873
1874 if(sg_offset) {
1875 /* Copy back the Scatter Gather buffers back to user space */
1876 u32 j;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001877 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 struct sg_simple_element* sg;
1879 int sg_size;
1880
1881 // re-acquire the original message to handle correctly the sg copy operation
1882 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1883 // get user msg size in u32s
1884 if(get_user(size, &user_msg[0])){
1885 rcode = -EFAULT;
1886 goto cleanup;
1887 }
1888 size = size>>16;
1889 size *= 4;
1890 /* Copy in the user's I2O command */
1891 if (copy_from_user (msg, user_msg, size)) {
1892 rcode = -EFAULT;
1893 goto cleanup;
1894 }
1895 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1896
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001897 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 sg = (struct sg_simple_element*)(msg + sg_offset);
1899 for (j = 0; j < sg_count; j++) {
1900 /* Copy out the SG list to user's buffer if necessary */
1901 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1902 sg_size = sg[j].flag_count & 0xffffff;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001903 // sg_simple_element API is 32 bit
1904 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1906 rcode = -EFAULT;
1907 goto cleanup;
1908 }
1909 }
1910 }
1911 }
1912
1913 /* Copy back the reply to user space */
1914 if (reply_size) {
1915 // we wrote our own values for context - now restore the user supplied ones
1916 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1917 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1918 rcode = -EFAULT;
1919 }
1920 if(copy_to_user(user_reply, reply, reply_size)) {
1921 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1922 rcode = -EFAULT;
1923 }
1924 }
1925
1926
1927cleanup:
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001928 if (rcode != -ETIME && rcode != -EINTR) {
1929 struct sg_simple_element *sg =
1930 (struct sg_simple_element*) (msg +sg_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 kfree (reply);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001932 while(sg_index) {
1933 if(sg_list[--sg_index]) {
1934 dma_free_coherent(&pHba->pDev->dev,
1935 sg[sg_index].flag_count & 0xffffff,
1936 sg_list[sg_index],
1937 sg[sg_index].addr_bus);
1938 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 }
1940 }
1941 return rcode;
1942}
1943
1944
1945/*
1946 * This routine returns information about the system. This does not effect
1947 * any logic and if the info is wrong - it doesn't matter.
1948 */
1949
1950/* Get all the info we can not get from kernel services */
1951static int adpt_system_info(void __user *buffer)
1952{
1953 sysInfo_S si;
1954
1955 memset(&si, 0, sizeof(si));
1956
1957 si.osType = OS_LINUX;
Adrian Bunka4cd16e2005-06-25 14:59:01 -07001958 si.osMajorVersion = 0;
1959 si.osMinorVersion = 0;
1960 si.osRevision = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 si.busType = SI_PCI_BUS;
1962 si.processorFamily = DPTI_sig.dsProcessorFamily;
1963
1964#if defined __i386__
1965 adpt_i386_info(&si);
1966#elif defined (__ia64__)
1967 adpt_ia64_info(&si);
1968#elif defined(__sparc__)
1969 adpt_sparc_info(&si);
1970#elif defined (__alpha__)
1971 adpt_alpha_info(&si);
1972#else
1973 si.processorType = 0xff ;
1974#endif
1975 if(copy_to_user(buffer, &si, sizeof(si))){
1976 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1977 return -EFAULT;
1978 }
1979
1980 return 0;
1981}
1982
1983#if defined __ia64__
1984static void adpt_ia64_info(sysInfo_S* si)
1985{
1986 // This is all the info we need for now
1987 // We will add more info as our new
1988 // managmenent utility requires it
1989 si->processorType = PROC_IA64;
1990}
1991#endif
1992
1993
1994#if defined __sparc__
1995static void adpt_sparc_info(sysInfo_S* si)
1996{
1997 // This is all the info we need for now
1998 // We will add more info as our new
1999 // managmenent utility requires it
2000 si->processorType = PROC_ULTRASPARC;
2001}
2002#endif
2003
2004#if defined __alpha__
2005static void adpt_alpha_info(sysInfo_S* si)
2006{
2007 // This is all the info we need for now
2008 // We will add more info as our new
2009 // managmenent utility requires it
2010 si->processorType = PROC_ALPHA;
2011}
2012#endif
2013
2014#if defined __i386__
2015
2016static void adpt_i386_info(sysInfo_S* si)
2017{
2018 // This is all the info we need for now
2019 // We will add more info as our new
2020 // managmenent utility requires it
2021 switch (boot_cpu_data.x86) {
2022 case CPU_386:
2023 si->processorType = PROC_386;
2024 break;
2025 case CPU_486:
2026 si->processorType = PROC_486;
2027 break;
2028 case CPU_586:
2029 si->processorType = PROC_PENTIUM;
2030 break;
2031 default: // Just in case
2032 si->processorType = PROC_PENTIUM;
2033 break;
2034 }
2035}
2036
2037#endif
2038
2039
2040static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2041 ulong arg)
2042{
2043 int minor;
2044 int error = 0;
2045 adpt_hba* pHba;
2046 ulong flags = 0;
2047 void __user *argp = (void __user *)arg;
2048
2049 minor = iminor(inode);
2050 if (minor >= DPTI_MAX_HBA){
2051 return -ENXIO;
2052 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002053 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2055 if (pHba->unit == minor) {
2056 break; /* found adapter */
2057 }
2058 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002059 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 if(pHba == NULL){
2061 return -ENXIO;
2062 }
2063
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002064 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2065 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 switch (cmd) {
2068 // TODO: handle 3 cases
2069 case DPT_SIGNATURE:
2070 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2071 return -EFAULT;
2072 }
2073 break;
2074 case I2OUSRCMD:
2075 return adpt_i2o_passthru(pHba, argp);
2076
2077 case DPT_CTRLINFO:{
2078 drvrHBAinfo_S HbaInfo;
2079
2080#define FLG_OSD_PCI_VALID 0x0001
2081#define FLG_OSD_DMA 0x0002
2082#define FLG_OSD_I2O 0x0004
2083 memset(&HbaInfo, 0, sizeof(HbaInfo));
2084 HbaInfo.drvrHBAnum = pHba->unit;
2085 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2086 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2087 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2088 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2089 HbaInfo.Interrupt = pHba->pDev->irq;
2090 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2091 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2092 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2093 return -EFAULT;
2094 }
2095 break;
2096 }
2097 case DPT_SYSINFO:
2098 return adpt_system_info(argp);
2099 case DPT_BLINKLED:{
2100 u32 value;
2101 value = (u32)adpt_read_blink_led(pHba);
2102 if (copy_to_user(argp, &value, sizeof(value))) {
2103 return -EFAULT;
2104 }
2105 break;
2106 }
2107 case I2ORESETCMD:
2108 if(pHba->host)
2109 spin_lock_irqsave(pHba->host->host_lock, flags);
2110 adpt_hba_reset(pHba);
2111 if(pHba->host)
2112 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2113 break;
2114 case I2ORESCANCMD:
2115 adpt_rescan(pHba);
2116 break;
2117 default:
2118 return -EINVAL;
2119 }
2120
2121 return error;
2122}
2123
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002124#ifdef CONFIG_COMPAT
2125static long compat_adpt_ioctl(struct file *file,
2126 unsigned int cmd, unsigned long arg)
2127{
2128 struct inode *inode;
2129 long ret;
2130
2131 inode = file->f_dentry->d_inode;
2132
2133 lock_kernel();
2134
2135 switch(cmd) {
2136 case DPT_SIGNATURE:
2137 case I2OUSRCMD:
2138 case DPT_CTRLINFO:
2139 case DPT_SYSINFO:
2140 case DPT_BLINKLED:
2141 case I2ORESETCMD:
2142 case I2ORESCANCMD:
2143 case (DPT_TARGET_BUSY & 0xFFFF):
2144 case DPT_TARGET_BUSY:
2145 ret = adpt_ioctl(inode, file, cmd, arg);
2146 break;
2147 default:
2148 ret = -ENOIOCTLCMD;
2149 }
2150
2151 unlock_kernel();
2152
2153 return ret;
2154}
2155#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
David Howells7d12e782006-10-05 14:55:46 +01002157static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158{
2159 struct scsi_cmnd* cmd;
2160 adpt_hba* pHba = dev_id;
2161 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002162 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 u32 status=0;
2164 u32 context;
2165 ulong flags = 0;
2166 int handled = 0;
2167
2168 if (pHba == NULL){
2169 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2170 return IRQ_NONE;
2171 }
2172 if(pHba->host)
2173 spin_lock_irqsave(pHba->host->host_lock, flags);
2174
2175 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2176 m = readl(pHba->reply_port);
2177 if(m == EMPTY_QUEUE){
2178 // Try twice then give up
2179 rmb();
2180 m = readl(pHba->reply_port);
2181 if(m == EMPTY_QUEUE){
2182 // This really should not happen
2183 printk(KERN_ERR"dpti: Could not get reply frame\n");
2184 goto out;
2185 }
2186 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002187 if (pHba->reply_pool_pa <= m &&
2188 m < pHba->reply_pool_pa +
2189 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2190 reply = (u8 *)pHba->reply_pool +
2191 (m - pHba->reply_pool_pa);
2192 } else {
2193 /* Ick, we should *never* be here */
2194 printk(KERN_ERR "dpti: reply frame not from pool\n");
2195 reply = (u8 *)bus_to_virt(m);
2196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
2198 if (readl(reply) & MSG_FAIL) {
2199 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002200 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 u32 old_context;
2202 PDEBUG("%s: Failed message\n",pHba->name);
2203 if(old_m >= 0x100000){
2204 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2205 writel(m,pHba->reply_port);
2206 continue;
2207 }
2208 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002209 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 old_context = readl(msg+12);
2211 writel(old_context, reply+12);
2212 adpt_send_nop(pHba, old_m);
2213 }
2214 context = readl(reply+8);
2215 if(context & 0x40000000){ // IOCTL
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002216 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002217 if( p != NULL) {
2218 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 }
2220 // All IOCTLs will also be post wait
2221 }
2222 if(context & 0x80000000){ // Post wait message
2223 status = readl(reply+16);
2224 if(status >> 24){
2225 status &= 0xffff; /* Get detail status */
2226 } else {
2227 status = I2O_POST_WAIT_OK;
2228 }
2229 if(!(context & 0x40000000)) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002230 cmd = adpt_cmd_from_context(pHba,
2231 readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 if(cmd != NULL) {
2233 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2234 }
2235 }
2236 adpt_i2o_post_wait_complete(context, status);
2237 } else { // SCSI message
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002238 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 if(cmd != NULL){
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002240 scsi_dma_unmap(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 if(cmd->serial_number != 0) { // If not timedout
2242 adpt_i2o_to_scsi(reply, cmd);
2243 }
2244 }
2245 }
2246 writel(m, pHba->reply_port);
2247 wmb();
2248 rmb();
2249 }
2250 handled = 1;
2251out: if(pHba->host)
2252 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2253 return IRQ_RETVAL(handled);
2254}
2255
2256static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2257{
2258 int i;
2259 u32 msg[MAX_MESSAGE_SIZE];
2260 u32* mptr;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002261 u32* lptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 u32 *lenptr;
2263 int direction;
2264 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002265 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 u32 len;
2267 u32 reqlen;
2268 s32 rcode;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002269 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002272 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 direction = 0x00000000;
2274
2275 scsidir = 0x00000000; // DATA NO XFER
2276 if(len) {
2277 /*
2278 * Set SCBFlags to indicate if data is being transferred
2279 * in or out, or no data transfer
2280 * Note: Do not have to verify index is less than 0 since
2281 * cmd->cmnd[0] is an unsigned char
2282 */
2283 switch(cmd->sc_data_direction){
2284 case DMA_FROM_DEVICE:
2285 scsidir =0x40000000; // DATA IN (iop<--dev)
2286 break;
2287 case DMA_TO_DEVICE:
2288 direction=0x04000000; // SGL OUT
2289 scsidir =0x80000000; // DATA OUT (iop-->dev)
2290 break;
2291 case DMA_NONE:
2292 break;
2293 case DMA_BIDIRECTIONAL:
2294 scsidir =0x40000000; // DATA IN (iop<--dev)
2295 // Assume In - and continue;
2296 break;
2297 default:
2298 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2299 pHba->name, cmd->cmnd[0]);
2300 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2301 cmd->scsi_done(cmd);
2302 return 0;
2303 }
2304 }
2305 // msg[0] is set later
2306 // I2O_CMD_SCSI_EXEC
2307 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2308 msg[2] = 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002309 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 // Our cards use the transaction context as the tag for queueing
2311 // Adaptec/DPT Private stuff
2312 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2313 msg[5] = d->tid;
2314 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2315 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2316 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2317 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2318 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2319
2320 mptr=msg+7;
2321
2322 // Write SCSI command into the message - always 16 byte block
2323 memset(mptr, 0, 16);
2324 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2325 mptr+=4;
2326 lenptr=mptr++; /* Remember me - fill in when we know */
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002327 if (dpt_dma64(pHba)) {
2328 reqlen = 16; // SINGLE SGE
2329 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2330 *mptr++ = 1 << PAGE_SHIFT;
2331 } else {
2332 reqlen = 14; // SINGLE SGE
2333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002336 nseg = scsi_dma_map(cmd);
2337 BUG_ON(nseg < 0);
2338 if (nseg) {
2339 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
2341 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002342 scsi_for_each_sg(cmd, sg, nseg, i) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002343 lptr = mptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2345 len+=sg_dma_len(sg);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002346 addr = sg_dma_address(sg);
2347 *mptr++ = dma_low(addr);
2348 if (dpt_dma64(pHba))
2349 *mptr++ = dma_high(addr);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002350 /* Make this an end of list */
2351 if (i == nseg - 1)
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002352 *lptr = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 reqlen = mptr - msg;
2355 *lenptr = len;
2356
2357 if(cmd->underflow && len != cmd->underflow){
2358 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2359 len, cmd->underflow);
2360 }
2361 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002362 *lenptr = len = 0;
2363 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 }
2365
2366 /* Stick the headers on */
2367 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2368
2369 // Send it on it's way
2370 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2371 if (rcode == 0) {
2372 return 0;
2373 }
2374 return rcode;
2375}
2376
2377
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002378static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
Andrew Morton24601bb2007-12-10 15:49:20 -08002379{
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002380 struct Scsi_Host *host;
Andrew Morton24601bb2007-12-10 15:49:20 -08002381
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002382 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
Andrew Morton24601bb2007-12-10 15:49:20 -08002383 if (host == NULL) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002384 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
Andrew Morton24601bb2007-12-10 15:49:20 -08002385 return -1;
2386 }
2387 host->hostdata[0] = (unsigned long)pHba;
2388 pHba->host = host;
2389
2390 host->irq = pHba->pDev->irq;
2391 /* no IO ports, so don't have to set host->io_port and
2392 * host->n_io_port
2393 */
2394 host->io_port = 0;
2395 host->n_io_port = 0;
2396 /* see comments in scsi_host.h */
2397 host->max_id = 16;
2398 host->max_lun = 256;
2399 host->max_channel = pHba->top_scsi_channel + 1;
2400 host->cmd_per_lun = 1;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002401 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
Andrew Morton24601bb2007-12-10 15:49:20 -08002402 host->sg_tablesize = pHba->sg_tablesize;
2403 host->can_queue = pHba->post_fifo_size;
2404
2405 return 0;
2406}
2407
2408
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002409static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410{
2411 adpt_hba* pHba;
2412 u32 hba_status;
2413 u32 dev_status;
2414 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2415 // I know this would look cleaner if I just read bytes
2416 // but the model I have been using for all the rest of the
2417 // io is in 4 byte words - so I keep that model
2418 u16 detailed_status = readl(reply+16) &0xffff;
2419 dev_status = (detailed_status & 0xff);
2420 hba_status = detailed_status >> 8;
2421
2422 // calculate resid for sg
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002423 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
2425 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2426
2427 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2428
2429 if(!(reply_flags & MSG_FAIL)) {
2430 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2431 case I2O_SCSI_DSC_SUCCESS:
2432 cmd->result = (DID_OK << 16);
2433 // handle underflow
2434 if(readl(reply+5) < cmd->underflow ) {
2435 cmd->result = (DID_ERROR <<16);
2436 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2437 }
2438 break;
2439 case I2O_SCSI_DSC_REQUEST_ABORTED:
2440 cmd->result = (DID_ABORT << 16);
2441 break;
2442 case I2O_SCSI_DSC_PATH_INVALID:
2443 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2444 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2445 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2446 case I2O_SCSI_DSC_NO_ADAPTER:
2447 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2448 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2449 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2450 cmd->result = (DID_TIME_OUT << 16);
2451 break;
2452 case I2O_SCSI_DSC_ADAPTER_BUSY:
2453 case I2O_SCSI_DSC_BUS_BUSY:
2454 cmd->result = (DID_BUS_BUSY << 16);
2455 break;
2456 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2457 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2458 cmd->result = (DID_RESET << 16);
2459 break;
2460 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2461 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2462 cmd->result = (DID_PARITY << 16);
2463 break;
2464 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2465 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2466 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2467 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2468 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2469 case I2O_SCSI_DSC_DATA_OVERRUN:
2470 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2471 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2472 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2473 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2474 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2475 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2476 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2477 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2478 case I2O_SCSI_DSC_INVALID_CDB:
2479 case I2O_SCSI_DSC_LUN_INVALID:
2480 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2481 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2482 case I2O_SCSI_DSC_NO_NEXUS:
2483 case I2O_SCSI_DSC_CDB_RECEIVED:
2484 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2485 case I2O_SCSI_DSC_QUEUE_FROZEN:
2486 case I2O_SCSI_DSC_REQUEST_INVALID:
2487 default:
2488 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2489 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2490 hba_status, dev_status, cmd->cmnd[0]);
2491 cmd->result = (DID_ERROR << 16);
2492 break;
2493 }
2494
2495 // copy over the request sense data if it was a check
2496 // condition status
Salyzyn, Markd814c512008-01-14 11:04:40 -08002497 if (dev_status == SAM_STAT_CHECK_CONDITION) {
FUJITA Tomonorib80ca4f2008-01-13 15:46:13 +09002498 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002500 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2502 cmd->sense_buffer[2] == DATA_PROTECT ){
2503 /* This is to handle an array failed */
2504 cmd->result = (DID_TIME_OUT << 16);
2505 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2506 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2507 hba_status, dev_status, cmd->cmnd[0]);
2508
2509 }
2510 }
2511 } else {
2512 /* In this condtion we could not talk to the tid
2513 * the card rejected it. We should signal a retry
2514 * for a limitted number of retries.
2515 */
2516 cmd->result = (DID_TIME_OUT << 16);
2517 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2518 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2519 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2520 }
2521
2522 cmd->result |= (dev_status);
2523
2524 if(cmd->scsi_done != NULL){
2525 cmd->scsi_done(cmd);
2526 }
2527 return cmd->result;
2528}
2529
2530
2531static s32 adpt_rescan(adpt_hba* pHba)
2532{
2533 s32 rcode;
2534 ulong flags = 0;
2535
2536 if(pHba->host)
2537 spin_lock_irqsave(pHba->host->host_lock, flags);
2538 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2539 goto out;
2540 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2541 goto out;
2542 rcode = 0;
2543out: if(pHba->host)
2544 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2545 return rcode;
2546}
2547
2548
2549static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2550{
2551 int i;
2552 int max;
2553 int tid;
2554 struct i2o_device *d;
2555 i2o_lct *lct = pHba->lct;
2556 u8 bus_no = 0;
2557 s16 scsi_id;
2558 s16 scsi_lun;
2559 u32 buf[10]; // at least 8 u32's
2560 struct adpt_device* pDev = NULL;
2561 struct i2o_device* pI2o_dev = NULL;
2562
2563 if (lct == NULL) {
2564 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2565 return -1;
2566 }
2567
2568 max = lct->table_size;
2569 max -= 3;
2570 max /= 9;
2571
2572 // Mark each drive as unscanned
2573 for (d = pHba->devices; d; d = d->next) {
2574 pDev =(struct adpt_device*) d->owner;
2575 if(!pDev){
2576 continue;
2577 }
2578 pDev->state |= DPTI_DEV_UNSCANNED;
2579 }
2580
2581 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2582
2583 for(i=0;i<max;i++) {
2584 if( lct->lct_entry[i].user_tid != 0xfff){
2585 continue;
2586 }
2587
2588 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2589 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2590 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2591 tid = lct->lct_entry[i].tid;
2592 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2593 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2594 continue;
2595 }
2596 bus_no = buf[0]>>16;
2597 scsi_id = buf[1];
2598 scsi_lun = (buf[2]>>8 )&0xff;
2599 pDev = pHba->channel[bus_no].device[scsi_id];
2600 /* da lun */
2601 while(pDev) {
2602 if(pDev->scsi_lun == scsi_lun) {
2603 break;
2604 }
2605 pDev = pDev->next_lun;
2606 }
2607 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002608 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 if(d==NULL)
2610 {
2611 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2612 return -ENOMEM;
2613 }
2614
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002615 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 d->next = NULL;
2617
2618 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2619
2620 d->flags = 0;
2621 adpt_i2o_report_hba_unit(pHba, d);
2622 adpt_i2o_install_device(pHba, d);
2623
2624 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2625 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2626 continue;
2627 }
2628 pDev = pHba->channel[bus_no].device[scsi_id];
2629 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302630 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 if(pDev == NULL) {
2632 return -ENOMEM;
2633 }
2634 pHba->channel[bus_no].device[scsi_id] = pDev;
2635 } else {
2636 while (pDev->next_lun) {
2637 pDev = pDev->next_lun;
2638 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302639 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 if(pDev == NULL) {
2641 return -ENOMEM;
2642 }
2643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 pDev->tid = d->lct_data.tid;
2645 pDev->scsi_channel = bus_no;
2646 pDev->scsi_id = scsi_id;
2647 pDev->scsi_lun = scsi_lun;
2648 pDev->pI2o_dev = d;
2649 d->owner = pDev;
2650 pDev->type = (buf[0])&0xff;
2651 pDev->flags = (buf[0]>>8)&0xff;
2652 // Too late, SCSI system has made up it's mind, but what the hey ...
2653 if(scsi_id > pHba->top_scsi_id){
2654 pHba->top_scsi_id = scsi_id;
2655 }
2656 if(scsi_lun > pHba->top_scsi_lun){
2657 pHba->top_scsi_lun = scsi_lun;
2658 }
2659 continue;
2660 } // end of new i2o device
2661
2662 // We found an old device - check it
2663 while(pDev) {
2664 if(pDev->scsi_lun == scsi_lun) {
2665 if(!scsi_device_online(pDev->pScsi_dev)) {
2666 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2667 pHba->name,bus_no,scsi_id,scsi_lun);
2668 if (pDev->pScsi_dev) {
2669 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2670 }
2671 }
2672 d = pDev->pI2o_dev;
2673 if(d->lct_data.tid != tid) { // something changed
2674 pDev->tid = tid;
2675 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2676 if (pDev->pScsi_dev) {
2677 pDev->pScsi_dev->changed = TRUE;
2678 pDev->pScsi_dev->removable = TRUE;
2679 }
2680 }
2681 // Found it - mark it scanned
2682 pDev->state = DPTI_DEV_ONLINE;
2683 break;
2684 }
2685 pDev = pDev->next_lun;
2686 }
2687 }
2688 }
2689 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2690 pDev =(struct adpt_device*) pI2o_dev->owner;
2691 if(!pDev){
2692 continue;
2693 }
2694 // Drive offline drives that previously existed but could not be found
2695 // in the LCT table
2696 if (pDev->state & DPTI_DEV_UNSCANNED){
2697 pDev->state = DPTI_DEV_OFFLINE;
2698 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2699 if (pDev->pScsi_dev) {
2700 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2701 }
2702 }
2703 }
2704 return 0;
2705}
2706
2707static void adpt_fail_posted_scbs(adpt_hba* pHba)
2708{
2709 struct scsi_cmnd* cmd = NULL;
2710 struct scsi_device* d = NULL;
2711
2712 shost_for_each_device(d, pHba->host) {
2713 unsigned long flags;
2714 spin_lock_irqsave(&d->list_lock, flags);
2715 list_for_each_entry(cmd, &d->cmd_list, list) {
2716 if(cmd->serial_number == 0){
2717 continue;
2718 }
2719 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2720 cmd->scsi_done(cmd);
2721 }
2722 spin_unlock_irqrestore(&d->list_lock, flags);
2723 }
2724}
2725
2726
2727/*============================================================================
2728 * Routines from i2o subsystem
2729 *============================================================================
2730 */
2731
2732
2733
2734/*
2735 * Bring an I2O controller into HOLD state. See the spec.
2736 */
2737static int adpt_i2o_activate_hba(adpt_hba* pHba)
2738{
2739 int rcode;
2740
2741 if(pHba->initialized ) {
2742 if (adpt_i2o_status_get(pHba) < 0) {
2743 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2744 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2745 return rcode;
2746 }
2747 if (adpt_i2o_status_get(pHba) < 0) {
2748 printk(KERN_INFO "HBA not responding.\n");
2749 return -1;
2750 }
2751 }
2752
2753 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2754 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2755 return -1;
2756 }
2757
2758 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2759 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2760 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2761 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2762 adpt_i2o_reset_hba(pHba);
2763 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2764 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2765 return -1;
2766 }
2767 }
2768 } else {
2769 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2770 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2771 return rcode;
2772 }
2773
2774 }
2775
2776 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2777 return -1;
2778 }
2779
2780 /* In HOLD state */
2781
2782 if (adpt_i2o_hrt_get(pHba) < 0) {
2783 return -1;
2784 }
2785
2786 return 0;
2787}
2788
2789/*
2790 * Bring a controller online into OPERATIONAL state.
2791 */
2792
2793static int adpt_i2o_online_hba(adpt_hba* pHba)
2794{
2795 if (adpt_i2o_systab_send(pHba) < 0) {
2796 adpt_i2o_delete_hba(pHba);
2797 return -1;
2798 }
2799 /* In READY state */
2800
2801 if (adpt_i2o_enable_hba(pHba) < 0) {
2802 adpt_i2o_delete_hba(pHba);
2803 return -1;
2804 }
2805
2806 /* In OPERATIONAL state */
2807 return 0;
2808}
2809
2810static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2811{
2812 u32 __iomem *msg;
2813 ulong timeout = jiffies + 5*HZ;
2814
2815 while(m == EMPTY_QUEUE){
2816 rmb();
2817 m = readl(pHba->post_port);
2818 if(m != EMPTY_QUEUE){
2819 break;
2820 }
2821 if(time_after(jiffies,timeout)){
2822 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2823 return 2;
2824 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002825 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 }
2827 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2828 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2829 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2830 writel( 0,&msg[2]);
2831 wmb();
2832
2833 writel(m, pHba->post_port);
2834 wmb();
2835 return 0;
2836}
2837
2838static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2839{
2840 u8 *status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002841 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 u32 __iomem *msg = NULL;
2843 int i;
2844 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 u32 m;
2846
2847 do {
2848 rmb();
2849 m = readl(pHba->post_port);
2850 if (m != EMPTY_QUEUE) {
2851 break;
2852 }
2853
2854 if(time_after(jiffies,timeout)){
2855 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2856 return -ETIMEDOUT;
2857 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002858 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 } while(m == EMPTY_QUEUE);
2860
2861 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2862
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002863 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002864 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 adpt_send_nop(pHba, m);
2866 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2867 pHba->name);
2868 return -ENOMEM;
2869 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002870 memset(status, 0, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
2872 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2873 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2874 writel(0, &msg[2]);
2875 writel(0x0106, &msg[3]); /* Transaction context */
2876 writel(4096, &msg[4]); /* Host page frame size */
2877 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2878 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002879 writel((u32)addr, &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
2881 writel(m, pHba->post_port);
2882 wmb();
2883
2884 // Wait for the reply status to come back
2885 do {
2886 if (*status) {
2887 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2888 break;
2889 }
2890 }
2891 rmb();
2892 if(time_after(jiffies,timeout)){
2893 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002894 /* We lose 4 bytes of "status" here, but we
2895 cannot free these because controller may
2896 awake and corrupt those bytes at any time */
2897 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 return -ETIMEDOUT;
2899 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002900 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 } while (1);
2902
2903 // If the command was successful, fill the fifo with our reply
2904 // message packets
2905 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002906 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 return -2;
2908 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002909 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002911 if(pHba->reply_pool != NULL) {
2912 dma_free_coherent(&pHba->pDev->dev,
2913 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2914 pHba->reply_pool, pHba->reply_pool_pa);
2915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002917 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2918 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2919 &pHba->reply_pool_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002920 if (!pHba->reply_pool) {
2921 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2922 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002924 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 for(i = 0; i < pHba->reply_fifo_size; i++) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002927 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2928 pHba->reply_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 }
2931 adpt_i2o_status_get(pHba);
2932 return 0;
2933}
2934
2935
2936/*
2937 * I2O System Table. Contains information about
2938 * all the IOPs in the system. Used to inform IOPs
2939 * about each other's existence.
2940 *
2941 * sys_tbl_ver is the CurrentChangeIndicator that is
2942 * used by IOPs to track changes.
2943 */
2944
2945
2946
2947static s32 adpt_i2o_status_get(adpt_hba* pHba)
2948{
2949 ulong timeout;
2950 u32 m;
2951 u32 __iomem *msg;
2952 u8 *status_block=NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953
2954 if(pHba->status_block == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002955 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2956 sizeof(i2o_status_block),
2957 &pHba->status_block_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 if(pHba->status_block == NULL) {
2959 printk(KERN_ERR
2960 "dpti%d: Get Status Block failed; Out of memory. \n",
2961 pHba->unit);
2962 return -ENOMEM;
2963 }
2964 }
2965 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2966 status_block = (u8*)(pHba->status_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2968 do {
2969 rmb();
2970 m = readl(pHba->post_port);
2971 if (m != EMPTY_QUEUE) {
2972 break;
2973 }
2974 if(time_after(jiffies,timeout)){
2975 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2976 pHba->name);
2977 return -ETIMEDOUT;
2978 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002979 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 } while(m==EMPTY_QUEUE);
2981
2982
2983 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2984
2985 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2986 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2987 writel(1, &msg[2]);
2988 writel(0, &msg[3]);
2989 writel(0, &msg[4]);
2990 writel(0, &msg[5]);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002991 writel( dma_low(pHba->status_block_pa), &msg[6]);
2992 writel( dma_high(pHba->status_block_pa), &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2994
2995 //post message
2996 writel(m, pHba->post_port);
2997 wmb();
2998
2999 while(status_block[87]!=0xff){
3000 if(time_after(jiffies,timeout)){
3001 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3002 pHba->unit);
3003 return -ETIMEDOUT;
3004 }
3005 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08003006 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 }
3008
3009 // Set up our number of outbound and inbound messages
3010 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3011 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3012 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3013 }
3014
3015 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3016 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3017 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3018 }
3019
3020 // Calculate the Scatter Gather list size
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003021 if (dpt_dma64(pHba)) {
3022 pHba->sg_tablesize
3023 = ((pHba->status_block->inbound_frame_size * 4
3024 - 14 * sizeof(u32))
3025 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3026 } else {
3027 pHba->sg_tablesize
3028 = ((pHba->status_block->inbound_frame_size * 4
3029 - 12 * sizeof(u32))
3030 / sizeof(struct sg_simple_element));
3031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3033 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3034 }
3035
3036
3037#ifdef DEBUG
3038 printk("dpti%d: State = ",pHba->unit);
3039 switch(pHba->status_block->iop_state) {
3040 case 0x01:
3041 printk("INIT\n");
3042 break;
3043 case 0x02:
3044 printk("RESET\n");
3045 break;
3046 case 0x04:
3047 printk("HOLD\n");
3048 break;
3049 case 0x05:
3050 printk("READY\n");
3051 break;
3052 case 0x08:
3053 printk("OPERATIONAL\n");
3054 break;
3055 case 0x10:
3056 printk("FAILED\n");
3057 break;
3058 case 0x11:
3059 printk("FAULTED\n");
3060 break;
3061 default:
3062 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3063 }
3064#endif
3065 return 0;
3066}
3067
3068/*
3069 * Get the IOP's Logical Configuration Table
3070 */
3071static int adpt_i2o_lct_get(adpt_hba* pHba)
3072{
3073 u32 msg[8];
3074 int ret;
3075 u32 buf[16];
3076
3077 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3078 pHba->lct_size = pHba->status_block->expected_lct_size;
3079 }
3080 do {
3081 if (pHba->lct == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003082 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3083 pHba->lct_size, &pHba->lct_pa,
3084 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 if(pHba->lct == NULL) {
3086 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3087 pHba->name);
3088 return -ENOMEM;
3089 }
3090 }
3091 memset(pHba->lct, 0, pHba->lct_size);
3092
3093 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3094 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3095 msg[2] = 0;
3096 msg[3] = 0;
3097 msg[4] = 0xFFFFFFFF; /* All devices */
3098 msg[5] = 0x00000000; /* Report now */
3099 msg[6] = 0xD0000000|pHba->lct_size;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003100 msg[7] = (u32)pHba->lct_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101
3102 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3103 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3104 pHba->name, ret);
3105 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3106 return ret;
3107 }
3108
3109 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3110 pHba->lct_size = pHba->lct->table_size << 2;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003111 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3112 pHba->lct, pHba->lct_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 pHba->lct = NULL;
3114 }
3115 } while (pHba->lct == NULL);
3116
3117 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3118
3119
3120 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3121 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3122 pHba->FwDebugBufferSize = buf[1];
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003123 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3124 pHba->FwDebugBufferSize);
3125 if (pHba->FwDebugBuffer_P) {
3126 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3127 FW_DEBUG_FLAGS_OFFSET;
3128 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3129 FW_DEBUG_BLED_OFFSET;
3130 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3131 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3132 FW_DEBUG_STR_LENGTH_OFFSET;
3133 pHba->FwDebugBuffer_P += buf[2];
3134 pHba->FwDebugFlags = 0;
3135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 }
3137
3138 return 0;
3139}
3140
3141static int adpt_i2o_build_sys_table(void)
3142{
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003143 adpt_hba* pHba = hba_chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 int count = 0;
3145
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003146 if (sys_tbl)
3147 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3148 sys_tbl, sys_tbl_pa);
3149
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3151 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3152
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003153 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3154 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02003155 if (!sys_tbl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3157 return -ENOMEM;
3158 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003159 memset(sys_tbl, 0, sys_tbl_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
3161 sys_tbl->num_entries = hba_count;
3162 sys_tbl->version = I2OVERSION;
3163 sys_tbl->change_ind = sys_tbl_ind++;
3164
3165 for(pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003166 u64 addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 // Get updated Status Block so we have the latest information
3168 if (adpt_i2o_status_get(pHba)) {
3169 sys_tbl->num_entries--;
3170 continue; // try next one
3171 }
3172
3173 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3174 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3175 sys_tbl->iops[count].seg_num = 0;
3176 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3177 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3178 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3179 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3180 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3181 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003182 addr = pHba->base_addr_phys + 0x40;
3183 sys_tbl->iops[count].inbound_low = dma_low(addr);
3184 sys_tbl->iops[count].inbound_high = dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
3186 count++;
3187 }
3188
3189#ifdef DEBUG
3190{
3191 u32 *table = (u32*)sys_tbl;
3192 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3193 for(count = 0; count < (sys_tbl_len >>2); count++) {
3194 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3195 count, table[count]);
3196 }
3197}
3198#endif
3199
3200 return 0;
3201}
3202
3203
3204/*
3205 * Dump the information block associated with a given unit (TID)
3206 */
3207
3208static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3209{
3210 char buf[64];
3211 int unit = d->lct_data.tid;
3212
3213 printk(KERN_INFO "TID %3.3d ", unit);
3214
3215 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3216 {
3217 buf[16]=0;
3218 printk(" Vendor: %-12.12s", buf);
3219 }
3220 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3221 {
3222 buf[16]=0;
3223 printk(" Device: %-12.12s", buf);
3224 }
3225 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3226 {
3227 buf[8]=0;
3228 printk(" Rev: %-12.12s\n", buf);
3229 }
3230#ifdef DEBUG
3231 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3232 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3233 printk(KERN_INFO "\tFlags: ");
3234
3235 if(d->lct_data.device_flags&(1<<0))
3236 printk("C"); // ConfigDialog requested
3237 if(d->lct_data.device_flags&(1<<1))
3238 printk("U"); // Multi-user capable
3239 if(!(d->lct_data.device_flags&(1<<4)))
3240 printk("P"); // Peer service enabled!
3241 if(!(d->lct_data.device_flags&(1<<5)))
3242 printk("M"); // Mgmt service enabled!
3243 printk("\n");
3244#endif
3245}
3246
3247#ifdef DEBUG
3248/*
3249 * Do i2o class name lookup
3250 */
3251static const char *adpt_i2o_get_class_name(int class)
3252{
3253 int idx = 16;
3254 static char *i2o_class_name[] = {
3255 "Executive",
3256 "Device Driver Module",
3257 "Block Device",
3258 "Tape Device",
3259 "LAN Interface",
3260 "WAN Interface",
3261 "Fibre Channel Port",
3262 "Fibre Channel Device",
3263 "SCSI Device",
3264 "ATE Port",
3265 "ATE Device",
3266 "Floppy Controller",
3267 "Floppy Device",
3268 "Secondary Bus Port",
3269 "Peer Transport Agent",
3270 "Peer Transport",
3271 "Unknown"
3272 };
3273
3274 switch(class&0xFFF) {
3275 case I2O_CLASS_EXECUTIVE:
3276 idx = 0; break;
3277 case I2O_CLASS_DDM:
3278 idx = 1; break;
3279 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3280 idx = 2; break;
3281 case I2O_CLASS_SEQUENTIAL_STORAGE:
3282 idx = 3; break;
3283 case I2O_CLASS_LAN:
3284 idx = 4; break;
3285 case I2O_CLASS_WAN:
3286 idx = 5; break;
3287 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3288 idx = 6; break;
3289 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3290 idx = 7; break;
3291 case I2O_CLASS_SCSI_PERIPHERAL:
3292 idx = 8; break;
3293 case I2O_CLASS_ATE_PORT:
3294 idx = 9; break;
3295 case I2O_CLASS_ATE_PERIPHERAL:
3296 idx = 10; break;
3297 case I2O_CLASS_FLOPPY_CONTROLLER:
3298 idx = 11; break;
3299 case I2O_CLASS_FLOPPY_DEVICE:
3300 idx = 12; break;
3301 case I2O_CLASS_BUS_ADAPTER_PORT:
3302 idx = 13; break;
3303 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3304 idx = 14; break;
3305 case I2O_CLASS_PEER_TRANSPORT:
3306 idx = 15; break;
3307 }
3308 return i2o_class_name[idx];
3309}
3310#endif
3311
3312
3313static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3314{
3315 u32 msg[6];
3316 int ret, size = sizeof(i2o_hrt);
3317
3318 do {
3319 if (pHba->hrt == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003320 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3321 size, &pHba->hrt_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 if (pHba->hrt == NULL) {
3323 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3324 return -ENOMEM;
3325 }
3326 }
3327
3328 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3329 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3330 msg[2]= 0;
3331 msg[3]= 0;
3332 msg[4]= (0xD0000000 | size); /* Simple transaction */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003333 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
3335 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3336 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3337 return ret;
3338 }
3339
3340 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003341 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3342 dma_free_coherent(&pHba->pDev->dev, size,
3343 pHba->hrt, pHba->hrt_pa);
3344 size = newsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 pHba->hrt = NULL;
3346 }
3347 } while(pHba->hrt == NULL);
3348 return 0;
3349}
3350
3351/*
3352 * Query one scalar group value or a whole scalar group.
3353 */
3354static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3355 int group, int field, void *buf, int buflen)
3356{
3357 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003358 u8 *opblk_va;
3359 dma_addr_t opblk_pa;
3360 u8 *resblk_va;
3361 dma_addr_t resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
3363 int size;
3364
3365 /* 8 bytes for header */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003366 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3367 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3368 if (resblk_va == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3370 return -ENOMEM;
3371 }
3372
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003373 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3374 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3375 if (opblk_va == NULL) {
3376 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3377 resblk_va, resblk_pa);
3378 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3379 pHba->name);
3380 return -ENOMEM;
3381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 if (field == -1) /* whole group */
3383 opblk[4] = -1;
3384
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003385 memcpy(opblk_va, opblk, sizeof(opblk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003387 opblk_va, opblk_pa, sizeof(opblk),
3388 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3389 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 if (size == -ETIME) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003391 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3392 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3394 return -ETIME;
3395 } else if (size == -EINTR) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003396 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3397 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3399 return -EINTR;
3400 }
3401
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003402 memcpy(buf, resblk_va+8, buflen); /* cut off header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003404 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3405 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 if (size < 0)
3407 return size;
3408
3409 return buflen;
3410}
3411
3412
3413/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3414 *
3415 * This function can be used for all UtilParamsGet/Set operations.
3416 * The OperationBlock is given in opblk-buffer,
3417 * and results are returned in resblk-buffer.
3418 * Note that the minimum sized resblk is 8 bytes and contains
3419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3420 */
3421static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003422 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3423 void *resblk_va, dma_addr_t resblk_pa, int reslen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424{
3425 u32 msg[9];
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003426 u32 *res = (u32 *)resblk_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 int wait_status;
3428
3429 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3430 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3431 msg[2] = 0;
3432 msg[3] = 0;
3433 msg[4] = 0;
3434 msg[5] = 0x54000000 | oplen; /* OperationBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003435 msg[6] = (u32)opblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003437 msg[8] = (u32)resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
3439 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003440 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 return wait_status; /* -DetailedStatus */
3442 }
3443
3444 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3445 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3446 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3447 pHba->name,
3448 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3449 : "PARAMS_GET",
3450 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3451 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3452 }
3453
3454 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3455}
3456
3457
3458static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3459{
3460 u32 msg[4];
3461 int ret;
3462
3463 adpt_i2o_status_get(pHba);
3464
3465 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3466
3467 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3468 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3469 return 0;
3470 }
3471
3472 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3473 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3474 msg[2] = 0;
3475 msg[3] = 0;
3476
3477 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3478 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3479 pHba->unit, -ret);
3480 } else {
3481 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3482 }
3483
3484 adpt_i2o_status_get(pHba);
3485 return ret;
3486}
3487
3488
3489/*
3490 * Enable IOP. Allows the IOP to resume external operations.
3491 */
3492static int adpt_i2o_enable_hba(adpt_hba* pHba)
3493{
3494 u32 msg[4];
3495 int ret;
3496
3497 adpt_i2o_status_get(pHba);
3498 if(!pHba->status_block){
3499 return -ENOMEM;
3500 }
3501 /* Enable only allowed on READY state */
3502 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3503 return 0;
3504
3505 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3506 return -EINVAL;
3507
3508 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3509 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3510 msg[2]= 0;
3511 msg[3]= 0;
3512
3513 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3514 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3515 pHba->name, ret);
3516 } else {
3517 PDEBUG("%s: Enabled.\n", pHba->name);
3518 }
3519
3520 adpt_i2o_status_get(pHba);
3521 return ret;
3522}
3523
3524
3525static int adpt_i2o_systab_send(adpt_hba* pHba)
3526{
3527 u32 msg[12];
3528 int ret;
3529
3530 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3531 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3532 msg[2] = 0;
3533 msg[3] = 0;
3534 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3535 msg[5] = 0; /* Segment 0 */
3536
3537 /*
3538 * Provide three SGL-elements:
3539 * System table (SysTab), Private memory space declaration and
3540 * Private i/o space declaration
3541 */
3542 msg[6] = 0x54000000 | sys_tbl_len;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003543 msg[7] = (u32)sys_tbl_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 msg[8] = 0x54000000 | 0;
3545 msg[9] = 0;
3546 msg[10] = 0xD4000000 | 0;
3547 msg[11] = 0;
3548
3549 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3550 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3551 pHba->name, ret);
3552 }
3553#ifdef DEBUG
3554 else {
3555 PINFO("%s: SysTab set.\n", pHba->name);
3556 }
3557#endif
3558
3559 return ret;
3560 }
3561
3562
3563/*============================================================================
3564 *
3565 *============================================================================
3566 */
3567
3568
3569#ifdef UARTDELAY
3570
3571static static void adpt_delay(int millisec)
3572{
3573 int i;
3574 for (i = 0; i < millisec; i++) {
3575 udelay(1000); /* delay for one millisecond */
3576 }
3577}
3578
3579#endif
3580
Andrew Morton24601bb2007-12-10 15:49:20 -08003581static struct scsi_host_template driver_template = {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003582 .module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 .name = "dpt_i2o",
3584 .proc_name = "dpt_i2o",
3585 .proc_info = adpt_proc_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 .info = adpt_info,
3587 .queuecommand = adpt_queue,
3588 .eh_abort_handler = adpt_abort,
3589 .eh_device_reset_handler = adpt_device_reset,
3590 .eh_bus_reset_handler = adpt_bus_reset,
3591 .eh_host_reset_handler = adpt_reset,
3592 .bios_param = adpt_bios_param,
3593 .slave_configure = adpt_slave_configure,
3594 .can_queue = MAX_TO_IOP_MESSAGES,
3595 .this_id = 7,
3596 .cmd_per_lun = 1,
3597 .use_clustering = ENABLE_CLUSTERING,
3598};
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003599
3600static int __init adpt_init(void)
3601{
3602 int error;
3603 adpt_hba *pHba, *next;
3604
3605 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3606
3607 error = adpt_detect(&driver_template);
3608 if (error < 0)
3609 return error;
3610 if (hba_chain == NULL)
3611 return -ENODEV;
3612
3613 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3614 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3615 if (error)
3616 goto fail;
3617 scsi_scan_host(pHba->host);
3618 }
3619 return 0;
3620fail:
3621 for (pHba = hba_chain; pHba; pHba = next) {
3622 next = pHba->next;
3623 scsi_remove_host(pHba->host);
3624 }
3625 return error;
3626}
3627
3628static void __exit adpt_exit(void)
3629{
3630 adpt_hba *pHba, *next;
3631
3632 for (pHba = hba_chain; pHba; pHba = pHba->next)
3633 scsi_remove_host(pHba->host);
3634 for (pHba = hba_chain; pHba; pHba = next) {
3635 next = pHba->next;
3636 adpt_release(pHba->host);
3637 }
3638}
3639
3640module_init(adpt_init);
3641module_exit(adpt_exit);
3642
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643MODULE_LICENSE("GPL");