blob: 496764349c4143ce5ac50c8bfd3fb1854144986c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
Jonathan Corbetdea3f662008-05-16 14:11:09 -060052#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080054#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <linux/timer.h>
57#include <linux/string.h>
58#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010059#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61#include <asm/processor.h> /* for boot_cpu_data */
62#include <asm/pgtable.h>
63#include <asm/io.h> /* for virt_to_bus, etc. */
64
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_device.h>
68#include <scsi/scsi_host.h>
69#include <scsi/scsi_tcq.h>
70
71#include "dpt/dptsig.h"
72#include "dpti.h"
73
74/*============================================================================
75 * Create a binary signature - this is read by dptsig
76 * Needed for our management apps
77 *============================================================================
78 */
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
Arjan van de Ven0b950672006-01-11 13:16:10 +0100105static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200115static struct class *adpt_sysfs_class;
116
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200117#ifdef CONFIG_COMPAT
118static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
119#endif
120
Arjan van de Ven00977a52007-02-12 00:55:34 -0800121static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 .ioctl = adpt_ioctl,
123 .open = adpt_open,
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200124 .release = adpt_close,
125#ifdef CONFIG_COMPAT
126 .compat_ioctl = compat_adpt_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/* Structures and definitions for synchronous message posting.
131 * See adpt_i2o_post_wait() for description
132 * */
133struct adpt_i2o_post_wait_data
134{
135 int status;
136 u32 id;
137 adpt_wait_queue_head_t *wq;
138 struct adpt_i2o_post_wait_data *next;
139};
140
141static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
142static u32 adpt_post_wait_id = 0;
143static DEFINE_SPINLOCK(adpt_post_wait_lock);
144
145
146/*============================================================================
147 * Functions
148 *============================================================================
149 */
150
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200151static inline int dpt_dma64(adpt_hba *pHba)
152{
153 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
154}
155
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200156static inline u32 dma_high(dma_addr_t addr)
157{
158 return upper_32_bits(addr);
159}
160
161static inline u32 dma_low(dma_addr_t addr)
162{
163 return (u32)addr;
164}
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166static u8 adpt_read_blink_led(adpt_hba* host)
167{
Harvey Harrison172c1222008-04-28 16:50:03 -0700168 if (host->FwDebugBLEDflag_P) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
170 return readb(host->FwDebugBLEDvalue_P);
171 }
172 }
173 return 0;
174}
175
176/*============================================================================
177 * Scsi host template interface functions
178 *============================================================================
179 */
180
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186MODULE_DEVICE_TABLE(pci,dptids);
187
Andrew Morton24601bb2007-12-10 15:49:20 -0800188static int adpt_detect(struct scsi_host_template* sht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
190 struct pci_dev *pDev = NULL;
191 adpt_hba* pHba;
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 PINFO("Detecting Adaptec I2O RAID controllers...\n");
194
195 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100196 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if(pDev->device == PCI_DPT_DEVICE_ID ||
198 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Andrew Morton24601bb2007-12-10 15:49:20 -0800199 if(adpt_install_hba(sht, pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 PERROR("Could not Init an I2O RAID device\n");
201 PERROR("Will not try to detect others.\n");
202 return hba_count-1;
203 }
Alan Coxa07f3532006-09-15 15:34:32 +0100204 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 }
206 }
207
208 /* In INIT state, Activate IOPs */
209 for (pHba = hba_chain; pHba; pHba = pHba->next) {
210 // Activate does get status , init outbound, and get hrt
211 if (adpt_i2o_activate_hba(pHba) < 0) {
212 adpt_i2o_delete_hba(pHba);
213 }
214 }
215
216
217 /* Active IOPs in HOLD state */
218
219rebuild_sys_tab:
220 if (hba_chain == NULL)
221 return 0;
222
223 /*
224 * If build_sys_table fails, we kill everything and bail
225 * as we can't init the IOPs w/o a system table
226 */
227 if (adpt_i2o_build_sys_table() < 0) {
228 adpt_i2o_sys_shutdown();
229 return 0;
230 }
231
232 PDEBUG("HBA's in HOLD state\n");
233
234 /* If IOP don't get online, we need to rebuild the System table */
235 for (pHba = hba_chain; pHba; pHba = pHba->next) {
236 if (adpt_i2o_online_hba(pHba) < 0) {
237 adpt_i2o_delete_hba(pHba);
238 goto rebuild_sys_tab;
239 }
240 }
241
242 /* Active IOPs now in OPERATIONAL state */
243 PDEBUG("HBA's in OPERATIONAL state\n");
244
245 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
246 for (pHba = hba_chain; pHba; pHba = pHba->next) {
247 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
248 if (adpt_i2o_lct_get(pHba) < 0){
249 adpt_i2o_delete_hba(pHba);
250 continue;
251 }
252
253 if (adpt_i2o_parse_lct(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257 adpt_inquiry(pHba);
258 }
259
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200260 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
261 if (IS_ERR(adpt_sysfs_class)) {
262 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
263 adpt_sysfs_class = NULL;
264 }
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 for (pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +0200267 if (adpt_scsi_host_alloc(pHba, sht) < 0){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 adpt_i2o_delete_hba(pHba);
269 continue;
270 }
271 pHba->initialized = TRUE;
272 pHba->state &= ~DPTI_STATE_RESET;
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200273 if (adpt_sysfs_class) {
Greg Kroah-Hartmand73a1a62008-07-21 20:03:34 -0700274 struct device *dev = device_create(adpt_sysfs_class,
Greg Kroah-Hartman9def0b92008-05-21 12:52:33 -0700275 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200276 "dpti%d", pHba->unit);
277 if (IS_ERR(dev)) {
278 printk(KERN_WARNING"dpti%d: unable to "
279 "create device in dpt_i2o class\n",
280 pHba->unit);
281 }
282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 }
284
285 // Register our control device node
286 // nodes will need to be created in /dev to access this
287 // the nodes can not be created from within the driver
288 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Andrew Morton24601bb2007-12-10 15:49:20 -0800289 adpt_i2o_sys_shutdown();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return 0;
291 }
292 return hba_count;
293}
294
295
Andrew Morton24601bb2007-12-10 15:49:20 -0800296/*
297 * scsi_unregister will be called AFTER we return.
298 */
299static int adpt_release(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
Andrew Morton24601bb2007-12-10 15:49:20 -0800301 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302// adpt_i2o_quiesce_hba(pHba);
303 adpt_i2o_delete_hba(pHba);
Andrew Morton24601bb2007-12-10 15:49:20 -0800304 scsi_unregister(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return 0;
306}
307
308
309static void adpt_inquiry(adpt_hba* pHba)
310{
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200311 u32 msg[17];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 u32 *mptr;
313 u32 *lenptr;
314 int direction;
315 int scsidir;
316 u32 len;
317 u32 reqlen;
318 u8* buf;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200319 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 u8 scb[16];
321 s32 rcode;
322
323 memset(msg, 0, sizeof(msg));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200324 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 if(!buf){
326 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
327 return;
328 }
329 memset((void*)buf, 0, 36);
330
331 len = 36;
332 direction = 0x00000000;
333 scsidir =0x40000000; // DATA IN (iop<--dev)
334
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200335 if (dpt_dma64(pHba))
336 reqlen = 17; // SINGLE SGE, 64 bit
337 else
338 reqlen = 14; // SINGLE SGE, 32 bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* Stick the headers on */
340 msg[0] = reqlen<<16 | SGL_OFFSET_12;
341 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
342 msg[2] = 0;
343 msg[3] = 0;
344 // Adaptec/DPT Private stuff
345 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
346 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
347 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
348 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
349 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
350 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
351 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
352
353 mptr=msg+7;
354
355 memset(scb, 0, sizeof(scb));
356 // Write SCSI command into the message - always 16 byte block
357 scb[0] = INQUIRY;
358 scb[1] = 0;
359 scb[2] = 0;
360 scb[3] = 0;
361 scb[4] = 36;
362 scb[5] = 0;
363 // Don't care about the rest of scb
364
365 memcpy(mptr, scb, sizeof(scb));
366 mptr+=4;
367 lenptr=mptr++; /* Remember me - fill in when we know */
368
369 /* Now fill in the SGList and command */
370 *lenptr = len;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200371 if (dpt_dma64(pHba)) {
372 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
373 *mptr++ = 1 << PAGE_SHIFT;
374 *mptr++ = 0xD0000000|direction|len;
375 *mptr++ = dma_low(addr);
376 *mptr++ = dma_high(addr);
377 } else {
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = addr;
380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 // Send it on it's way
383 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
384 if (rcode != 0) {
385 sprintf(pHba->detail, "Adaptec I2O RAID");
386 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
387 if (rcode != -ETIME && rcode != -EINTR)
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200388 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 } else {
390 memset(pHba->detail, 0, sizeof(pHba->detail));
391 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
392 memcpy(&(pHba->detail[16]), " Model: ", 8);
393 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
394 memcpy(&(pHba->detail[40]), " FW: ", 4);
395 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
396 pHba->detail[48] = '\0'; /* precautionary */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200397 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 }
399 adpt_i2o_status_get(pHba);
400 return ;
401}
402
403
404static int adpt_slave_configure(struct scsi_device * device)
405{
406 struct Scsi_Host *host = device->host;
407 adpt_hba* pHba;
408
409 pHba = (adpt_hba *) host->hostdata[0];
410
411 if (host->can_queue && device->tagged_supported) {
412 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
413 host->can_queue - 1);
414 } else {
415 scsi_adjust_queue_depth(device, 0, 1);
416 }
417 return 0;
418}
419
420static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
421{
422 adpt_hba* pHba = NULL;
423 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425 cmd->scsi_done = done;
426 /*
427 * SCSI REQUEST_SENSE commands will be executed automatically by the
428 * Host Adapter for any errors, so they should not be executed
429 * explicitly unless the Sense Data is zero indicating that no error
430 * occurred.
431 */
432
433 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
434 cmd->result = (DID_OK << 16);
435 cmd->scsi_done(cmd);
436 return 0;
437 }
438
439 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
440 if (!pHba) {
441 return FAILED;
442 }
443
444 rmb();
445 /*
446 * TODO: I need to block here if I am processing ioctl cmds
447 * but if the outstanding cmds all finish before the ioctl,
448 * the scsi-core will not know to start sending cmds to me again.
449 * I need to a way to restart the scsi-cores queues or should I block
450 * calling scsi_done on the outstanding cmds instead
451 * for now we don't set the IOCTL state
452 */
453 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
454 pHba->host->last_reset = jiffies;
455 pHba->host->resetting = 1;
456 return 1;
457 }
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 // TODO if the cmd->device if offline then I may need to issue a bus rescan
460 // followed by a get_lct to see if the device is there anymore
461 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
462 /*
463 * First command request for this device. Set up a pointer
464 * to the device structure. This should be a TEST_UNIT_READY
465 * command from scan_scsis_single.
466 */
467 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
468 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
469 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
470 cmd->result = (DID_NO_CONNECT << 16);
471 cmd->scsi_done(cmd);
472 return 0;
473 }
474 cmd->device->hostdata = pDev;
475 }
476 pDev->pScsi_dev = cmd->device;
477
478 /*
479 * If we are being called from when the device is being reset,
480 * delay processing of the command until later.
481 */
482 if (pDev->state & DPTI_DEV_RESET ) {
483 return FAILED;
484 }
485 return adpt_scsi_to_i2o(pHba, cmd, pDev);
486}
487
488static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
489 sector_t capacity, int geom[])
490{
491 int heads=-1;
492 int sectors=-1;
493 int cylinders=-1;
494
495 // *** First lets set the default geometry ****
496
497 // If the capacity is less than ox2000
498 if (capacity < 0x2000 ) { // floppy
499 heads = 18;
500 sectors = 2;
501 }
502 // else if between 0x2000 and 0x20000
503 else if (capacity < 0x20000) {
504 heads = 64;
505 sectors = 32;
506 }
507 // else if between 0x20000 and 0x40000
508 else if (capacity < 0x40000) {
509 heads = 65;
510 sectors = 63;
511 }
512 // else if between 0x4000 and 0x80000
513 else if (capacity < 0x80000) {
514 heads = 128;
515 sectors = 63;
516 }
517 // else if greater than 0x80000
518 else {
519 heads = 255;
520 sectors = 63;
521 }
522 cylinders = sector_div(capacity, heads * sectors);
523
524 // Special case if CDROM
525 if(sdev->type == 5) { // CDROM
526 heads = 252;
527 sectors = 63;
528 cylinders = 1111;
529 }
530
531 geom[0] = heads;
532 geom[1] = sectors;
533 geom[2] = cylinders;
534
535 PDEBUG("adpt_bios_param: exit\n");
536 return 0;
537}
538
539
540static const char *adpt_info(struct Scsi_Host *host)
541{
542 adpt_hba* pHba;
543
544 pHba = (adpt_hba *) host->hostdata[0];
545 return (char *) (pHba->detail);
546}
547
548static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
549 int length, int inout)
550{
551 struct adpt_device* d;
552 int id;
553 int chan;
554 int len = 0;
555 int begin = 0;
556 int pos = 0;
557 adpt_hba* pHba;
558 int unit;
559
560 *start = buffer;
561 if (inout == TRUE) {
562 /*
563 * The user has done a write and wants us to take the
564 * data in the buffer and do something with it.
565 * proc_scsiwrite calls us with inout = 1
566 *
567 * Read data from buffer (writing to us) - NOT SUPPORTED
568 */
569 return -EINVAL;
570 }
571
572 /*
573 * inout = 0 means the user has done a read and wants information
574 * returned, so we write information about the cards into the buffer
575 * proc_scsiread() calls us with inout = 0
576 */
577
578 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100579 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 for (pHba = hba_chain; pHba; pHba = pHba->next) {
581 if (pHba->host == host) {
582 break; /* found adapter */
583 }
584 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100585 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 if (pHba == NULL) {
587 return 0;
588 }
589 host = pHba->host;
590
591 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
592 len += sprintf(buffer+len, "%s\n", pHba->detail);
593 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
594 pHba->host->host_no, pHba->name, host->irq);
595 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
596 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
597
598 pos = begin + len;
599
600 /* CHECKPOINT */
601 if(pos > offset + length) {
602 goto stop_output;
603 }
604 if(pos <= offset) {
605 /*
606 * If we haven't even written to where we last left
607 * off (the last time we were called), reset the
608 * beginning pointer.
609 */
610 len = 0;
611 begin = pos;
612 }
613 len += sprintf(buffer+len, "Devices:\n");
614 for(chan = 0; chan < MAX_CHANNEL; chan++) {
615 for(id = 0; id < MAX_ID; id++) {
616 d = pHba->channel[chan].device[id];
617 while(d){
618 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
619 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
620 pos = begin + len;
621
622
623 /* CHECKPOINT */
624 if(pos > offset + length) {
625 goto stop_output;
626 }
627 if(pos <= offset) {
628 len = 0;
629 begin = pos;
630 }
631
632 unit = d->pI2o_dev->lct_data.tid;
633 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
634 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
635 scsi_device_online(d->pScsi_dev)? "online":"offline");
636 pos = begin + len;
637
638 /* CHECKPOINT */
639 if(pos > offset + length) {
640 goto stop_output;
641 }
642 if(pos <= offset) {
643 len = 0;
644 begin = pos;
645 }
646
647 d = d->next_lun;
648 }
649 }
650 }
651
652 /*
653 * begin is where we last checked our position with regards to offset
654 * begin is always less than offset. len is relative to begin. It
655 * is the number of bytes written past begin
656 *
657 */
658stop_output:
659 /* stop the output and calculate the correct length */
660 *(buffer + len) = '\0';
661
662 *start = buffer + (offset - begin); /* Start of wanted data */
663 len -= (offset - begin);
664 if(len > length) {
665 len = length;
666 } else if(len < 0){
667 len = 0;
668 **start = '\0';
669 }
670 return len;
671}
672
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200673/*
674 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
675 */
676static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
677{
678 return (u32)cmd->serial_number;
679}
680
681/*
682 * Go from a u32 'context' to a struct scsi_cmnd * .
683 * This could probably be made more efficient.
684 */
685static struct scsi_cmnd *
686 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
687{
688 struct scsi_cmnd * cmd;
689 struct scsi_device * d;
690
691 if (context == 0)
692 return NULL;
693
694 spin_unlock(pHba->host->host_lock);
695 shost_for_each_device(d, pHba->host) {
696 unsigned long flags;
697 spin_lock_irqsave(&d->list_lock, flags);
698 list_for_each_entry(cmd, &d->cmd_list, list) {
699 if (((u32)cmd->serial_number == context)) {
700 spin_unlock_irqrestore(&d->list_lock, flags);
701 scsi_device_put(d);
702 spin_lock(pHba->host->host_lock);
703 return cmd;
704 }
705 }
706 spin_unlock_irqrestore(&d->list_lock, flags);
707 }
708 spin_lock(pHba->host->host_lock);
709
710 return NULL;
711}
712
713/*
714 * Turn a pointer to ioctl reply data into an u32 'context'
715 */
716static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
717{
718#if BITS_PER_LONG == 32
719 return (u32)(unsigned long)reply;
720#else
721 ulong flags = 0;
722 u32 nr, i;
723
724 spin_lock_irqsave(pHba->host->host_lock, flags);
725 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
726 for (i = 0; i < nr; i++) {
727 if (pHba->ioctl_reply_context[i] == NULL) {
728 pHba->ioctl_reply_context[i] = reply;
729 break;
730 }
731 }
732 spin_unlock_irqrestore(pHba->host->host_lock, flags);
733 if (i >= nr) {
734 kfree (reply);
735 printk(KERN_WARNING"%s: Too many outstanding "
736 "ioctl commands\n", pHba->name);
737 return (u32)-1;
738 }
739
740 return i;
741#endif
742}
743
744/*
745 * Go from an u32 'context' to a pointer to ioctl reply data.
746 */
747static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
748{
749#if BITS_PER_LONG == 32
750 return (void *)(unsigned long)context;
751#else
752 void *p = pHba->ioctl_reply_context[context];
753 pHba->ioctl_reply_context[context] = NULL;
754
755 return p;
756#endif
757}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
759/*===========================================================================
760 * Error Handling routines
761 *===========================================================================
762 */
763
764static int adpt_abort(struct scsi_cmnd * cmd)
765{
766 adpt_hba* pHba = NULL; /* host bus adapter structure */
767 struct adpt_device* dptdevice; /* dpt per device information */
768 u32 msg[5];
769 int rcode;
770
771 if(cmd->serial_number == 0){
772 return FAILED;
773 }
774 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
775 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
776 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
777 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
778 return FAILED;
779 }
780
781 memset(msg, 0, sizeof(msg));
782 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
783 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
784 msg[2] = 0;
785 msg[3]= 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200786 msg[4] = adpt_cmd_to_context(cmd);
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800787 if (pHba->host)
788 spin_lock_irq(pHba->host->host_lock);
789 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
790 if (pHba->host)
791 spin_unlock_irq(pHba->host->host_lock);
792 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 if(rcode == -EOPNOTSUPP ){
794 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
795 return FAILED;
796 }
797 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
798 return FAILED;
799 }
800 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
801 return SUCCESS;
802}
803
804
805#define I2O_DEVICE_RESET 0x27
806// This is the same for BLK and SCSI devices
807// NOTE this is wrong in the i2o.h definitions
808// This is not currently supported by our adapter but we issue it anyway
809static int adpt_device_reset(struct scsi_cmnd* cmd)
810{
811 adpt_hba* pHba;
812 u32 msg[4];
813 u32 rcode;
814 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700815 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 pHba = (void*) cmd->device->host->hostdata[0];
818 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
819 if (!d) {
820 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
821 return FAILED;
822 }
823 memset(msg, 0, sizeof(msg));
824 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
825 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
826 msg[2] = 0;
827 msg[3] = 0;
828
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800829 if (pHba->host)
830 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 old_state = d->state;
832 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800833 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
834 d->state = old_state;
835 if (pHba->host)
836 spin_unlock_irq(pHba->host->host_lock);
837 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 if(rcode == -EOPNOTSUPP ){
839 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
840 return FAILED;
841 }
842 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
843 return FAILED;
844 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
846 return SUCCESS;
847 }
848}
849
850
851#define I2O_HBA_BUS_RESET 0x87
852// This version of bus reset is called by the eh_error handler
853static int adpt_bus_reset(struct scsi_cmnd* cmd)
854{
855 adpt_hba* pHba;
856 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800857 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
860 memset(msg, 0, sizeof(msg));
861 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
862 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
863 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
864 msg[2] = 0;
865 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800866 if (pHba->host)
867 spin_lock_irq(pHba->host->host_lock);
868 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
869 if (pHba->host)
870 spin_unlock_irq(pHba->host->host_lock);
871 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
873 return FAILED;
874 } else {
875 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
876 return SUCCESS;
877 }
878}
879
880// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400881static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
883 adpt_hba* pHba;
884 int rcode;
885 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
886 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
887 rcode = adpt_hba_reset(pHba);
888 if(rcode == 0){
889 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
890 return SUCCESS;
891 } else {
892 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
893 return FAILED;
894 }
895}
896
Jeff Garzik df0ae242005-05-28 07:57:14 -0400897static int adpt_reset(struct scsi_cmnd* cmd)
898{
899 int rc;
900
901 spin_lock_irq(cmd->device->host->host_lock);
902 rc = __adpt_reset(cmd);
903 spin_unlock_irq(cmd->device->host->host_lock);
904
905 return rc;
906}
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
909static int adpt_hba_reset(adpt_hba* pHba)
910{
911 int rcode;
912
913 pHba->state |= DPTI_STATE_RESET;
914
915 // Activate does get status , init outbound, and get hrt
916 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
917 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
918 adpt_i2o_delete_hba(pHba);
919 return rcode;
920 }
921
922 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
923 adpt_i2o_delete_hba(pHba);
924 return rcode;
925 }
926 PDEBUG("%s: in HOLD state\n",pHba->name);
927
928 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
929 adpt_i2o_delete_hba(pHba);
930 return rcode;
931 }
932 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
933
934 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
935 adpt_i2o_delete_hba(pHba);
936 return rcode;
937 }
938
939 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
940 adpt_i2o_delete_hba(pHba);
941 return rcode;
942 }
943 pHba->state &= ~DPTI_STATE_RESET;
944
945 adpt_fail_posted_scbs(pHba);
946 return 0; /* return success */
947}
948
949/*===========================================================================
950 *
951 *===========================================================================
952 */
953
954
955static void adpt_i2o_sys_shutdown(void)
956{
957 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100958 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
960 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
961 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
962 /* Delete all IOPs from the controller chain */
963 /* They should have already been released by the
964 * scsi-core
965 */
966 for (pHba = hba_chain; pHba; pHba = pNext) {
967 pNext = pHba->next;
968 adpt_i2o_delete_hba(pHba);
969 }
970
971 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972// spin_lock_irqsave(&adpt_post_wait_lock, flags);
973 /* Nothing should be outstanding at this point so just
974 * free them
975 */
Adrian Bunk458af542005-11-27 00:36:37 +0100976 for(p1 = adpt_post_wait_queue; p1;) {
977 old = p1;
978 p1 = p1->next;
979 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 }
981// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
982 adpt_post_wait_queue = NULL;
983
984 printk(KERN_INFO "Adaptec I2O controllers down.\n");
985}
986
Andrew Morton24601bb2007-12-10 15:49:20 -0800987static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989
990 adpt_hba* pHba = NULL;
991 adpt_hba* p = NULL;
992 ulong base_addr0_phys = 0;
993 ulong base_addr1_phys = 0;
994 u32 hba_map0_area_size = 0;
995 u32 hba_map1_area_size = 0;
996 void __iomem *base_addr_virt = NULL;
997 void __iomem *msg_addr_virt = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200998 int dma64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 if(pci_enable_device(pDev)) {
1003 return -EINVAL;
1004 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -05001005
1006 if (pci_request_regions(pDev, "dpt_i2o")) {
1007 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1008 return -EINVAL;
1009 }
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 pci_set_master(pDev);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001012
1013 /*
1014 * See if we should enable dma64 mode.
1015 */
1016 if (sizeof(dma_addr_t) > 4 &&
Yang Hongyang6a355282009-04-06 19:01:13 -07001017 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
Yang Hongyang284901a2009-04-06 19:01:15 -07001018 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001019 dma64 = 1;
1020 }
Yang Hongyang284901a2009-04-06 19:01:15 -07001021 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 return -EINVAL;
1023
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001024 /* adapter only supports message blocks below 4GB */
Yang Hongyang284901a2009-04-06 19:01:15 -07001025 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001026
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 base_addr0_phys = pci_resource_start(pDev,0);
1028 hba_map0_area_size = pci_resource_len(pDev,0);
1029
1030 // Check if standard PCI card or single BAR Raptor
1031 if(pDev->device == PCI_DPT_DEVICE_ID){
1032 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1033 // Raptor card with this device id needs 4M
1034 hba_map0_area_size = 0x400000;
1035 } else { // Not Raptor - it is a PCI card
1036 if(hba_map0_area_size > 0x100000 ){
1037 hba_map0_area_size = 0x100000;
1038 }
1039 }
1040 } else {// Raptor split BAR config
1041 // Use BAR1 in this configuration
1042 base_addr1_phys = pci_resource_start(pDev,1);
1043 hba_map1_area_size = pci_resource_len(pDev,1);
1044 raptorFlag = TRUE;
1045 }
1046
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001047#if BITS_PER_LONG == 64
1048 /*
1049 * The original Adaptec 64 bit driver has this comment here:
1050 * "x86_64 machines need more optimal mappings"
1051 *
1052 * I assume some HBAs report ridiculously large mappings
1053 * and we need to limit them on platforms with IOMMUs.
1054 */
1055 if (raptorFlag == TRUE) {
1056 if (hba_map0_area_size > 128)
1057 hba_map0_area_size = 128;
1058 if (hba_map1_area_size > 524288)
1059 hba_map1_area_size = 524288;
1060 } else {
1061 if (hba_map0_area_size > 524288)
1062 hba_map0_area_size = 524288;
1063 }
1064#endif
1065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1067 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -05001068 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 PERROR("dpti: adpt_config_hba: io remap failed\n");
1070 return -EINVAL;
1071 }
1072
1073 if(raptorFlag == TRUE) {
1074 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1075 if (!msg_addr_virt) {
1076 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1077 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001078 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 return -EINVAL;
1080 }
1081 } else {
1082 msg_addr_virt = base_addr_virt;
1083 }
1084
1085 // Allocate and zero the data structure
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02001086 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1087 if (!pHba) {
1088 if (msg_addr_virt != base_addr_virt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 iounmap(msg_addr_virt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001091 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 return -ENOMEM;
1093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
Arjan van de Ven0b950672006-01-11 13:16:10 +01001095 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
1097 if(hba_chain != NULL){
1098 for(p = hba_chain; p->next; p = p->next);
1099 p->next = pHba;
1100 } else {
1101 hba_chain = pHba;
1102 }
1103 pHba->next = NULL;
1104 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -07001105 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 hba_count++;
1107
Arjan van de Ven0b950672006-01-11 13:16:10 +01001108 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
1110 pHba->pDev = pDev;
1111 pHba->base_addr_phys = base_addr0_phys;
1112
1113 // Set up the Virtual Base Address of the I2O Device
1114 pHba->base_addr_virt = base_addr_virt;
1115 pHba->msg_addr_virt = msg_addr_virt;
1116 pHba->irq_mask = base_addr_virt+0x30;
1117 pHba->post_port = base_addr_virt+0x40;
1118 pHba->reply_port = base_addr_virt+0x44;
1119
1120 pHba->hrt = NULL;
1121 pHba->lct = NULL;
1122 pHba->lct_size = 0;
1123 pHba->status_block = NULL;
1124 pHba->post_count = 0;
1125 pHba->state = DPTI_STATE_RESET;
1126 pHba->pDev = pDev;
1127 pHba->devices = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001128 pHba->dma64 = dma64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
1130 // Initializing the spinlocks
1131 spin_lock_init(&pHba->state_lock);
1132 spin_lock_init(&adpt_post_wait_lock);
1133
1134 if(raptorFlag == 0){
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001135 printk(KERN_INFO "Adaptec I2O RAID controller"
1136 " %d at %p size=%x irq=%d%s\n",
1137 hba_count-1, base_addr_virt,
1138 hba_map0_area_size, pDev->irq,
1139 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 } else {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001141 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1142 hba_count-1, pDev->irq,
1143 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1145 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1146 }
1147
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001148 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1150 adpt_i2o_delete_hba(pHba);
1151 return -EINVAL;
1152 }
1153
1154 return 0;
1155}
1156
1157
1158static void adpt_i2o_delete_hba(adpt_hba* pHba)
1159{
1160 adpt_hba* p1;
1161 adpt_hba* p2;
1162 struct i2o_device* d;
1163 struct i2o_device* next;
1164 int i;
1165 int j;
1166 struct adpt_device* pDev;
1167 struct adpt_device* pNext;
1168
1169
Arjan van de Ven0b950672006-01-11 13:16:10 +01001170 mutex_lock(&adpt_configuration_lock);
Andrew Morton24601bb2007-12-10 15:49:20 -08001171 // scsi_unregister calls our adpt_release which
1172 // does a quiese
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 if(pHba->host){
1174 free_irq(pHba->host->irq, pHba);
1175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 p2 = NULL;
1177 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1178 if(p1 == pHba) {
1179 if(p2) {
1180 p2->next = p1->next;
1181 } else {
1182 hba_chain = p1->next;
1183 }
1184 break;
1185 }
1186 }
1187
1188 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001189 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001192 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1194 iounmap(pHba->msg_addr_virt);
1195 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001196 if(pHba->FwDebugBuffer_P)
1197 iounmap(pHba->FwDebugBuffer_P);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001198 if(pHba->hrt) {
1199 dma_free_coherent(&pHba->pDev->dev,
1200 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1201 pHba->hrt, pHba->hrt_pa);
1202 }
1203 if(pHba->lct) {
1204 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1205 pHba->lct, pHba->lct_pa);
1206 }
1207 if(pHba->status_block) {
1208 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1209 pHba->status_block, pHba->status_block_pa);
1210 }
1211 if(pHba->reply_pool) {
1212 dma_free_coherent(&pHba->pDev->dev,
1213 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1214 pHba->reply_pool, pHba->reply_pool_pa);
1215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 for(d = pHba->devices; d ; d = next){
1218 next = d->next;
1219 kfree(d);
1220 }
1221 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1222 for(j = 0; j < MAX_ID; j++){
1223 if(pHba->channel[i].device[j] != NULL){
1224 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1225 pNext = pDev->next_lun;
1226 kfree(pDev);
1227 }
1228 }
1229 }
1230 }
Alan Coxa07f3532006-09-15 15:34:32 +01001231 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 kfree(pHba);
1233
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001234 if (adpt_sysfs_class)
1235 device_destroy(adpt_sysfs_class,
1236 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 if(hba_count <= 0){
1239 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001240 if (adpt_sysfs_class) {
1241 class_destroy(adpt_sysfs_class);
1242 adpt_sysfs_class = NULL;
1243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 }
1245}
1246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1248{
1249 struct adpt_device* d;
1250
1251 if(chan < 0 || chan >= MAX_CHANNEL)
1252 return NULL;
1253
1254 if( pHba->channel[chan].device == NULL){
1255 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1256 return NULL;
1257 }
1258
1259 d = pHba->channel[chan].device[id];
1260 if(!d || d->tid == 0) {
1261 return NULL;
1262 }
1263
1264 /* If it is the only lun at that address then this should match*/
1265 if(d->scsi_lun == lun){
1266 return d;
1267 }
1268
1269 /* else we need to look through all the luns */
1270 for(d=d->next_lun ; d ; d = d->next_lun){
1271 if(d->scsi_lun == lun){
1272 return d;
1273 }
1274 }
1275 return NULL;
1276}
1277
1278
1279static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1280{
1281 // I used my own version of the WAIT_QUEUE_HEAD
1282 // to handle some version differences
1283 // When embedded in the kernel this could go back to the vanilla one
1284 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1285 int status = 0;
1286 ulong flags = 0;
1287 struct adpt_i2o_post_wait_data *p1, *p2;
1288 struct adpt_i2o_post_wait_data *wait_data =
1289 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001290 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Andrew Morton4452ea52005-06-23 00:10:26 -07001292 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 /*
1296 * The spin locking is needed to keep anyone from playing
1297 * with the queue pointers and id while we do the same
1298 */
1299 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1300 // TODO we need a MORE unique way of getting ids
1301 // to support async LCT get
1302 wait_data->next = adpt_post_wait_queue;
1303 adpt_post_wait_queue = wait_data;
1304 adpt_post_wait_id++;
1305 adpt_post_wait_id &= 0x7fff;
1306 wait_data->id = adpt_post_wait_id;
1307 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1308
1309 wait_data->wq = &adpt_wq_i2o_post;
1310 wait_data->status = -ETIMEDOUT;
1311
Andrew Morton4452ea52005-06-23 00:10:26 -07001312 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1315 timeout *= HZ;
1316 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1317 set_current_state(TASK_INTERRUPTIBLE);
1318 if(pHba->host)
1319 spin_unlock_irq(pHba->host->host_lock);
1320 if (!timeout)
1321 schedule();
1322 else{
1323 timeout = schedule_timeout(timeout);
1324 if (timeout == 0) {
1325 // I/O issued, but cannot get result in
1326 // specified time. Freeing resorces is
1327 // dangerous.
1328 status = -ETIME;
1329 }
1330 }
1331 if(pHba->host)
1332 spin_lock_irq(pHba->host->host_lock);
1333 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001334 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
1336 if(status == -ETIMEDOUT){
1337 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1338 // We will have to free the wait_data memory during shutdown
1339 return status;
1340 }
1341
1342 /* Remove the entry from the queue. */
1343 p2 = NULL;
1344 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1345 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1346 if(p1 == wait_data) {
1347 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1348 status = -EOPNOTSUPP;
1349 }
1350 if(p2) {
1351 p2->next = p1->next;
1352 } else {
1353 adpt_post_wait_queue = p1->next;
1354 }
1355 break;
1356 }
1357 }
1358 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1359
1360 kfree(wait_data);
1361
1362 return status;
1363}
1364
1365
1366static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1367{
1368
1369 u32 m = EMPTY_QUEUE;
1370 u32 __iomem *msg;
1371 ulong timeout = jiffies + 30*HZ;
1372 do {
1373 rmb();
1374 m = readl(pHba->post_port);
1375 if (m != EMPTY_QUEUE) {
1376 break;
1377 }
1378 if(time_after(jiffies,timeout)){
1379 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1380 return -ETIMEDOUT;
1381 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001382 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 } while(m == EMPTY_QUEUE);
1384
1385 msg = pHba->msg_addr_virt + m;
1386 memcpy_toio(msg, data, len);
1387 wmb();
1388
1389 //post message
1390 writel(m, pHba->post_port);
1391 wmb();
1392
1393 return 0;
1394}
1395
1396
1397static void adpt_i2o_post_wait_complete(u32 context, int status)
1398{
1399 struct adpt_i2o_post_wait_data *p1 = NULL;
1400 /*
1401 * We need to search through the adpt_post_wait
1402 * queue to see if the given message is still
1403 * outstanding. If not, it means that the IOP
1404 * took longer to respond to the message than we
1405 * had allowed and timer has already expired.
1406 * Not much we can do about that except log
1407 * it for debug purposes, increase timeout, and recompile
1408 *
1409 * Lock needed to keep anyone from moving queue pointers
1410 * around while we're looking through them.
1411 */
1412
1413 context &= 0x7fff;
1414
1415 spin_lock(&adpt_post_wait_lock);
1416 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1417 if(p1->id == context) {
1418 p1->status = status;
1419 spin_unlock(&adpt_post_wait_lock);
1420 wake_up_interruptible(p1->wq);
1421 return;
1422 }
1423 }
1424 spin_unlock(&adpt_post_wait_lock);
1425 // If this happens we lose commands that probably really completed
1426 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1427 printk(KERN_DEBUG" Tasks in wait queue:\n");
1428 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1429 printk(KERN_DEBUG" %d\n",p1->id);
1430 }
1431 return;
1432}
1433
1434static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1435{
1436 u32 msg[8];
1437 u8* status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001438 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 u32 m = EMPTY_QUEUE ;
1440 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1441
1442 if(pHba->initialized == FALSE) { // First time reset should be quick
1443 timeout = jiffies + (25*HZ);
1444 } else {
1445 adpt_i2o_quiesce_hba(pHba);
1446 }
1447
1448 do {
1449 rmb();
1450 m = readl(pHba->post_port);
1451 if (m != EMPTY_QUEUE) {
1452 break;
1453 }
1454 if(time_after(jiffies,timeout)){
1455 printk(KERN_WARNING"Timeout waiting for message!\n");
1456 return -ETIMEDOUT;
1457 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001458 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 } while (m == EMPTY_QUEUE);
1460
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001461 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 if(status == NULL) {
1463 adpt_send_nop(pHba, m);
1464 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1465 return -ENOMEM;
1466 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001467 memset(status,0,4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
1469 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1470 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1471 msg[2]=0;
1472 msg[3]=0;
1473 msg[4]=0;
1474 msg[5]=0;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001475 msg[6]=dma_low(addr);
1476 msg[7]=dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1479 wmb();
1480 writel(m, pHba->post_port);
1481 wmb();
1482
1483 while(*status == 0){
1484 if(time_after(jiffies,timeout)){
1485 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001486 /* We lose 4 bytes of "status" here, but we cannot
1487 free these because controller may awake and corrupt
1488 those bytes at any time */
1489 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 return -ETIMEDOUT;
1491 }
1492 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001493 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 }
1495
1496 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1497 PDEBUG("%s: Reset in progress...\n", pHba->name);
1498 // Here we wait for message frame to become available
1499 // indicated that reset has finished
1500 do {
1501 rmb();
1502 m = readl(pHba->post_port);
1503 if (m != EMPTY_QUEUE) {
1504 break;
1505 }
1506 if(time_after(jiffies,timeout)){
1507 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001508 /* We lose 4 bytes of "status" here, but we
1509 cannot free these because controller may
1510 awake and corrupt those bytes at any time */
1511 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 return -ETIMEDOUT;
1513 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001514 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 } while (m == EMPTY_QUEUE);
1516 // Flush the offset
1517 adpt_send_nop(pHba, m);
1518 }
1519 adpt_i2o_status_get(pHba);
1520 if(*status == 0x02 ||
1521 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1522 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1523 pHba->name);
1524 } else {
1525 PDEBUG("%s: Reset completed.\n", pHba->name);
1526 }
1527
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001528 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529#ifdef UARTDELAY
1530 // This delay is to allow someone attached to the card through the debug UART to
1531 // set up the dump levels that they want before the rest of the initialization sequence
1532 adpt_delay(20000);
1533#endif
1534 return 0;
1535}
1536
1537
1538static int adpt_i2o_parse_lct(adpt_hba* pHba)
1539{
1540 int i;
1541 int max;
1542 int tid;
1543 struct i2o_device *d;
1544 i2o_lct *lct = pHba->lct;
1545 u8 bus_no = 0;
1546 s16 scsi_id;
1547 s16 scsi_lun;
1548 u32 buf[10]; // larger than 7, or 8 ...
1549 struct adpt_device* pDev;
1550
1551 if (lct == NULL) {
1552 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1553 return -1;
1554 }
1555
1556 max = lct->table_size;
1557 max -= 3;
1558 max /= 9;
1559
1560 for(i=0;i<max;i++) {
1561 if( lct->lct_entry[i].user_tid != 0xfff){
1562 /*
1563 * If we have hidden devices, we need to inform the upper layers about
1564 * the possible maximum id reference to handle device access when
1565 * an array is disassembled. This code has no other purpose but to
1566 * allow us future access to devices that are currently hidden
1567 * behind arrays, hotspares or have not been configured (JBOD mode).
1568 */
1569 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1570 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1571 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1572 continue;
1573 }
1574 tid = lct->lct_entry[i].tid;
1575 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1576 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1577 continue;
1578 }
1579 bus_no = buf[0]>>16;
1580 scsi_id = buf[1];
1581 scsi_lun = (buf[2]>>8 )&0xff;
1582 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1583 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1584 continue;
1585 }
1586 if (scsi_id >= MAX_ID){
1587 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1588 continue;
1589 }
1590 if(bus_no > pHba->top_scsi_channel){
1591 pHba->top_scsi_channel = bus_no;
1592 }
1593 if(scsi_id > pHba->top_scsi_id){
1594 pHba->top_scsi_id = scsi_id;
1595 }
1596 if(scsi_lun > pHba->top_scsi_lun){
1597 pHba->top_scsi_lun = scsi_lun;
1598 }
1599 continue;
1600 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001601 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 if(d==NULL)
1603 {
1604 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1605 return -ENOMEM;
1606 }
1607
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001608 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 d->next = NULL;
1610
1611 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1612
1613 d->flags = 0;
1614 tid = d->lct_data.tid;
1615 adpt_i2o_report_hba_unit(pHba, d);
1616 adpt_i2o_install_device(pHba, d);
1617 }
1618 bus_no = 0;
1619 for(d = pHba->devices; d ; d = d->next) {
1620 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1621 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1622 tid = d->lct_data.tid;
1623 // TODO get the bus_no from hrt-but for now they are in order
1624 //bus_no =
1625 if(bus_no > pHba->top_scsi_channel){
1626 pHba->top_scsi_channel = bus_no;
1627 }
1628 pHba->channel[bus_no].type = d->lct_data.class_id;
1629 pHba->channel[bus_no].tid = tid;
1630 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1631 {
1632 pHba->channel[bus_no].scsi_id = buf[1];
1633 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1634 }
1635 // TODO remove - this is just until we get from hrt
1636 bus_no++;
1637 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1638 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1639 break;
1640 }
1641 }
1642 }
1643
1644 // Setup adpt_device table
1645 for(d = pHba->devices; d ; d = d->next) {
1646 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1647 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1648 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1649
1650 tid = d->lct_data.tid;
1651 scsi_id = -1;
1652 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1653 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1654 bus_no = buf[0]>>16;
1655 scsi_id = buf[1];
1656 scsi_lun = (buf[2]>>8 )&0xff;
1657 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1658 continue;
1659 }
1660 if (scsi_id >= MAX_ID) {
1661 continue;
1662 }
1663 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301664 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 if(pDev == NULL) {
1666 return -ENOMEM;
1667 }
1668 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 } else {
1670 for( pDev = pHba->channel[bus_no].device[scsi_id];
1671 pDev->next_lun; pDev = pDev->next_lun){
1672 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301673 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 if(pDev->next_lun == NULL) {
1675 return -ENOMEM;
1676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 pDev = pDev->next_lun;
1678 }
1679 pDev->tid = tid;
1680 pDev->scsi_channel = bus_no;
1681 pDev->scsi_id = scsi_id;
1682 pDev->scsi_lun = scsi_lun;
1683 pDev->pI2o_dev = d;
1684 d->owner = pDev;
1685 pDev->type = (buf[0])&0xff;
1686 pDev->flags = (buf[0]>>8)&0xff;
1687 if(scsi_id > pHba->top_scsi_id){
1688 pHba->top_scsi_id = scsi_id;
1689 }
1690 if(scsi_lun > pHba->top_scsi_lun){
1691 pHba->top_scsi_lun = scsi_lun;
1692 }
1693 }
1694 if(scsi_id == -1){
1695 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1696 d->lct_data.identity_tag);
1697 }
1698 }
1699 }
1700 return 0;
1701}
1702
1703
1704/*
1705 * Each I2O controller has a chain of devices on it - these match
1706 * the useful parts of the LCT of the board.
1707 */
1708
1709static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1710{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001711 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 d->controller=pHba;
1713 d->owner=NULL;
1714 d->next=pHba->devices;
1715 d->prev=NULL;
1716 if (pHba->devices != NULL){
1717 pHba->devices->prev=d;
1718 }
1719 pHba->devices=d;
1720 *d->dev_name = 0;
1721
Arjan van de Ven0b950672006-01-11 13:16:10 +01001722 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 return 0;
1724}
1725
1726static int adpt_open(struct inode *inode, struct file *file)
1727{
1728 int minor;
1729 adpt_hba* pHba;
1730
Jonathan Corbetdea3f662008-05-16 14:11:09 -06001731 lock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 //TODO check for root access
1733 //
1734 minor = iminor(inode);
1735 if (minor >= hba_count) {
Jonathan Corbetdea3f662008-05-16 14:11:09 -06001736 unlock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 return -ENXIO;
1738 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001739 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1741 if (pHba->unit == minor) {
1742 break; /* found adapter */
1743 }
1744 }
1745 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001746 mutex_unlock(&adpt_configuration_lock);
Jonathan Corbetdea3f662008-05-16 14:11:09 -06001747 unlock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 return -ENXIO;
1749 }
1750
1751// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001752 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753// return -EBUSY;
1754// }
1755
1756 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001757 mutex_unlock(&adpt_configuration_lock);
Jonathan Corbetdea3f662008-05-16 14:11:09 -06001758 unlock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760 return 0;
1761}
1762
1763static int adpt_close(struct inode *inode, struct file *file)
1764{
1765 int minor;
1766 adpt_hba* pHba;
1767
1768 minor = iminor(inode);
1769 if (minor >= hba_count) {
1770 return -ENXIO;
1771 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001772 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1774 if (pHba->unit == minor) {
1775 break; /* found adapter */
1776 }
1777 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001778 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 if (pHba == NULL) {
1780 return -ENXIO;
1781 }
1782
1783 pHba->in_use = 0;
1784
1785 return 0;
1786}
1787
1788
1789static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1790{
1791 u32 msg[MAX_MESSAGE_SIZE];
1792 u32* reply = NULL;
1793 u32 size = 0;
1794 u32 reply_size = 0;
1795 u32 __user *user_msg = arg;
1796 u32 __user * user_reply = NULL;
1797 void *sg_list[pHba->sg_tablesize];
1798 u32 sg_offset = 0;
1799 u32 sg_count = 0;
1800 int sg_index = 0;
1801 u32 i = 0;
1802 u32 rcode = 0;
1803 void *p = NULL;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001804 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 ulong flags = 0;
1806
1807 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1808 // get user msg size in u32s
1809 if(get_user(size, &user_msg[0])){
1810 return -EFAULT;
1811 }
1812 size = size>>16;
1813
1814 user_reply = &user_msg[size];
1815 if(size > MAX_MESSAGE_SIZE){
1816 return -EFAULT;
1817 }
1818 size *= 4; // Convert to bytes
1819
1820 /* Copy in the user's I2O command */
1821 if(copy_from_user(msg, user_msg, size)) {
1822 return -EFAULT;
1823 }
1824 get_user(reply_size, &user_reply[0]);
1825 reply_size = reply_size>>16;
1826 if(reply_size > REPLY_FRAME_SIZE){
1827 reply_size = REPLY_FRAME_SIZE;
1828 }
1829 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301830 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if(reply == NULL) {
1832 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1833 return -ENOMEM;
1834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 sg_offset = (msg[0]>>4)&0xf;
1836 msg[2] = 0x40000000; // IOCTL context
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001837 msg[3] = adpt_ioctl_to_context(pHba, reply);
1838 if (msg[3] == (u32)-1)
1839 return -EBUSY;
1840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1842 if(sg_offset) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001843 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1845 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1846 if (sg_count > pHba->sg_tablesize){
1847 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1848 kfree (reply);
1849 return -EINVAL;
1850 }
1851
1852 for(i = 0; i < sg_count; i++) {
1853 int sg_size;
1854
1855 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1856 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1857 rcode = -EINVAL;
1858 goto cleanup;
1859 }
1860 sg_size = sg[i].flag_count & 0xffffff;
1861 /* Allocate memory for the transfer */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001862 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 if(!p) {
1864 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1865 pHba->name,sg_size,i,sg_count);
1866 rcode = -ENOMEM;
1867 goto cleanup;
1868 }
1869 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1870 /* Copy in the user's SG buffer if necessary */
1871 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001872 // sg_simple_element API is 32 bit
1873 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1875 rcode = -EFAULT;
1876 goto cleanup;
1877 }
1878 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001879 /* sg_simple_element API is 32 bit, but addr < 4GB */
1880 sg[i].addr_bus = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 }
1882 }
1883
1884 do {
1885 if(pHba->host)
1886 spin_lock_irqsave(pHba->host->host_lock, flags);
1887 // This state stops any new commands from enterring the
1888 // controller while processing the ioctl
1889// pHba->state |= DPTI_STATE_IOCTL;
1890// We can't set this now - The scsi subsystem sets host_blocked and
1891// the queue empties and stops. We need a way to restart the queue
1892 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1893 if (rcode != 0)
1894 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1895 rcode, reply);
1896// pHba->state &= ~DPTI_STATE_IOCTL;
1897 if(pHba->host)
1898 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1899 } while(rcode == -ETIMEDOUT);
1900
1901 if(rcode){
1902 goto cleanup;
1903 }
1904
1905 if(sg_offset) {
1906 /* Copy back the Scatter Gather buffers back to user space */
1907 u32 j;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001908 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 struct sg_simple_element* sg;
1910 int sg_size;
1911
1912 // re-acquire the original message to handle correctly the sg copy operation
1913 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1914 // get user msg size in u32s
1915 if(get_user(size, &user_msg[0])){
1916 rcode = -EFAULT;
1917 goto cleanup;
1918 }
1919 size = size>>16;
1920 size *= 4;
Alan Coxef7562b2009-10-27 15:35:35 +00001921 if (size > MAX_MESSAGE_SIZE) {
OGAWA Hirofumiaefba412009-10-30 17:02:31 +09001922 rcode = -EINVAL;
Alan Coxef7562b2009-10-27 15:35:35 +00001923 goto cleanup;
1924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 /* Copy in the user's I2O command */
1926 if (copy_from_user (msg, user_msg, size)) {
1927 rcode = -EFAULT;
1928 goto cleanup;
1929 }
1930 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1931
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001932 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 sg = (struct sg_simple_element*)(msg + sg_offset);
1934 for (j = 0; j < sg_count; j++) {
1935 /* Copy out the SG list to user's buffer if necessary */
1936 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1937 sg_size = sg[j].flag_count & 0xffffff;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001938 // sg_simple_element API is 32 bit
1939 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1941 rcode = -EFAULT;
1942 goto cleanup;
1943 }
1944 }
1945 }
1946 }
1947
1948 /* Copy back the reply to user space */
1949 if (reply_size) {
1950 // we wrote our own values for context - now restore the user supplied ones
1951 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1952 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1953 rcode = -EFAULT;
1954 }
1955 if(copy_to_user(user_reply, reply, reply_size)) {
1956 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1957 rcode = -EFAULT;
1958 }
1959 }
1960
1961
1962cleanup:
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001963 if (rcode != -ETIME && rcode != -EINTR) {
1964 struct sg_simple_element *sg =
1965 (struct sg_simple_element*) (msg +sg_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 kfree (reply);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001967 while(sg_index) {
1968 if(sg_list[--sg_index]) {
1969 dma_free_coherent(&pHba->pDev->dev,
1970 sg[sg_index].flag_count & 0xffffff,
1971 sg_list[sg_index],
1972 sg[sg_index].addr_bus);
1973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
1975 }
1976 return rcode;
1977}
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979#if defined __ia64__
1980static void adpt_ia64_info(sysInfo_S* si)
1981{
1982 // This is all the info we need for now
1983 // We will add more info as our new
1984 // managmenent utility requires it
1985 si->processorType = PROC_IA64;
1986}
1987#endif
1988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989#if defined __sparc__
1990static void adpt_sparc_info(sysInfo_S* si)
1991{
1992 // This is all the info we need for now
1993 // We will add more info as our new
1994 // managmenent utility requires it
1995 si->processorType = PROC_ULTRASPARC;
1996}
1997#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998#if defined __alpha__
1999static void adpt_alpha_info(sysInfo_S* si)
2000{
2001 // This is all the info we need for now
2002 // We will add more info as our new
2003 // managmenent utility requires it
2004 si->processorType = PROC_ALPHA;
2005}
2006#endif
2007
2008#if defined __i386__
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009static void adpt_i386_info(sysInfo_S* si)
2010{
2011 // This is all the info we need for now
2012 // We will add more info as our new
2013 // managmenent utility requires it
2014 switch (boot_cpu_data.x86) {
2015 case CPU_386:
2016 si->processorType = PROC_386;
2017 break;
2018 case CPU_486:
2019 si->processorType = PROC_486;
2020 break;
2021 case CPU_586:
2022 si->processorType = PROC_PENTIUM;
2023 break;
2024 default: // Just in case
2025 si->processorType = PROC_PENTIUM;
2026 break;
2027 }
2028}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029#endif
2030
Andrew Morton8b2cc912008-05-06 20:42:42 -07002031/*
2032 * This routine returns information about the system. This does not effect
2033 * any logic and if the info is wrong - it doesn't matter.
2034 */
2035
2036/* Get all the info we can not get from kernel services */
2037static int adpt_system_info(void __user *buffer)
2038{
2039 sysInfo_S si;
2040
2041 memset(&si, 0, sizeof(si));
2042
2043 si.osType = OS_LINUX;
2044 si.osMajorVersion = 0;
2045 si.osMinorVersion = 0;
2046 si.osRevision = 0;
2047 si.busType = SI_PCI_BUS;
2048 si.processorFamily = DPTI_sig.dsProcessorFamily;
2049
2050#if defined __i386__
2051 adpt_i386_info(&si);
2052#elif defined (__ia64__)
2053 adpt_ia64_info(&si);
2054#elif defined(__sparc__)
2055 adpt_sparc_info(&si);
2056#elif defined (__alpha__)
2057 adpt_alpha_info(&si);
2058#else
2059 si.processorType = 0xff ;
2060#endif
2061 if (copy_to_user(buffer, &si, sizeof(si))){
2062 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2063 return -EFAULT;
2064 }
2065
2066 return 0;
2067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
2069static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2070 ulong arg)
2071{
2072 int minor;
2073 int error = 0;
2074 adpt_hba* pHba;
2075 ulong flags = 0;
2076 void __user *argp = (void __user *)arg;
2077
2078 minor = iminor(inode);
2079 if (minor >= DPTI_MAX_HBA){
2080 return -ENXIO;
2081 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002082 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2084 if (pHba->unit == minor) {
2085 break; /* found adapter */
2086 }
2087 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002088 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 if(pHba == NULL){
2090 return -ENXIO;
2091 }
2092
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002093 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2094 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096 switch (cmd) {
2097 // TODO: handle 3 cases
2098 case DPT_SIGNATURE:
2099 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2100 return -EFAULT;
2101 }
2102 break;
2103 case I2OUSRCMD:
2104 return adpt_i2o_passthru(pHba, argp);
2105
2106 case DPT_CTRLINFO:{
2107 drvrHBAinfo_S HbaInfo;
2108
2109#define FLG_OSD_PCI_VALID 0x0001
2110#define FLG_OSD_DMA 0x0002
2111#define FLG_OSD_I2O 0x0004
2112 memset(&HbaInfo, 0, sizeof(HbaInfo));
2113 HbaInfo.drvrHBAnum = pHba->unit;
2114 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2115 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2116 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2117 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2118 HbaInfo.Interrupt = pHba->pDev->irq;
2119 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2120 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2121 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2122 return -EFAULT;
2123 }
2124 break;
2125 }
2126 case DPT_SYSINFO:
2127 return adpt_system_info(argp);
2128 case DPT_BLINKLED:{
2129 u32 value;
2130 value = (u32)adpt_read_blink_led(pHba);
2131 if (copy_to_user(argp, &value, sizeof(value))) {
2132 return -EFAULT;
2133 }
2134 break;
2135 }
2136 case I2ORESETCMD:
2137 if(pHba->host)
2138 spin_lock_irqsave(pHba->host->host_lock, flags);
2139 adpt_hba_reset(pHba);
2140 if(pHba->host)
2141 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2142 break;
2143 case I2ORESCANCMD:
2144 adpt_rescan(pHba);
2145 break;
2146 default:
2147 return -EINVAL;
2148 }
2149
2150 return error;
2151}
2152
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002153#ifdef CONFIG_COMPAT
2154static long compat_adpt_ioctl(struct file *file,
2155 unsigned int cmd, unsigned long arg)
2156{
2157 struct inode *inode;
2158 long ret;
2159
2160 inode = file->f_dentry->d_inode;
2161
2162 lock_kernel();
2163
2164 switch(cmd) {
2165 case DPT_SIGNATURE:
2166 case I2OUSRCMD:
2167 case DPT_CTRLINFO:
2168 case DPT_SYSINFO:
2169 case DPT_BLINKLED:
2170 case I2ORESETCMD:
2171 case I2ORESCANCMD:
2172 case (DPT_TARGET_BUSY & 0xFFFF):
2173 case DPT_TARGET_BUSY:
2174 ret = adpt_ioctl(inode, file, cmd, arg);
2175 break;
2176 default:
2177 ret = -ENOIOCTLCMD;
2178 }
2179
2180 unlock_kernel();
2181
2182 return ret;
2183}
2184#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
David Howells7d12e782006-10-05 14:55:46 +01002186static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187{
2188 struct scsi_cmnd* cmd;
2189 adpt_hba* pHba = dev_id;
2190 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002191 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 u32 status=0;
2193 u32 context;
2194 ulong flags = 0;
2195 int handled = 0;
2196
2197 if (pHba == NULL){
2198 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2199 return IRQ_NONE;
2200 }
2201 if(pHba->host)
2202 spin_lock_irqsave(pHba->host->host_lock, flags);
2203
2204 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2205 m = readl(pHba->reply_port);
2206 if(m == EMPTY_QUEUE){
2207 // Try twice then give up
2208 rmb();
2209 m = readl(pHba->reply_port);
2210 if(m == EMPTY_QUEUE){
2211 // This really should not happen
2212 printk(KERN_ERR"dpti: Could not get reply frame\n");
2213 goto out;
2214 }
2215 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002216 if (pHba->reply_pool_pa <= m &&
2217 m < pHba->reply_pool_pa +
2218 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2219 reply = (u8 *)pHba->reply_pool +
2220 (m - pHba->reply_pool_pa);
2221 } else {
2222 /* Ick, we should *never* be here */
2223 printk(KERN_ERR "dpti: reply frame not from pool\n");
2224 reply = (u8 *)bus_to_virt(m);
2225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
2227 if (readl(reply) & MSG_FAIL) {
2228 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002229 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 u32 old_context;
2231 PDEBUG("%s: Failed message\n",pHba->name);
2232 if(old_m >= 0x100000){
2233 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2234 writel(m,pHba->reply_port);
2235 continue;
2236 }
2237 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002238 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 old_context = readl(msg+12);
2240 writel(old_context, reply+12);
2241 adpt_send_nop(pHba, old_m);
2242 }
2243 context = readl(reply+8);
2244 if(context & 0x40000000){ // IOCTL
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002245 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002246 if( p != NULL) {
2247 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 }
2249 // All IOCTLs will also be post wait
2250 }
2251 if(context & 0x80000000){ // Post wait message
2252 status = readl(reply+16);
2253 if(status >> 24){
2254 status &= 0xffff; /* Get detail status */
2255 } else {
2256 status = I2O_POST_WAIT_OK;
2257 }
2258 if(!(context & 0x40000000)) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002259 cmd = adpt_cmd_from_context(pHba,
2260 readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 if(cmd != NULL) {
2262 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2263 }
2264 }
2265 adpt_i2o_post_wait_complete(context, status);
2266 } else { // SCSI message
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002267 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 if(cmd != NULL){
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002269 scsi_dma_unmap(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 if(cmd->serial_number != 0) { // If not timedout
2271 adpt_i2o_to_scsi(reply, cmd);
2272 }
2273 }
2274 }
2275 writel(m, pHba->reply_port);
2276 wmb();
2277 rmb();
2278 }
2279 handled = 1;
2280out: if(pHba->host)
2281 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2282 return IRQ_RETVAL(handled);
2283}
2284
2285static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2286{
2287 int i;
2288 u32 msg[MAX_MESSAGE_SIZE];
2289 u32* mptr;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002290 u32* lptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 u32 *lenptr;
2292 int direction;
2293 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002294 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 u32 len;
2296 u32 reqlen;
2297 s32 rcode;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002298 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002301 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 direction = 0x00000000;
2303
2304 scsidir = 0x00000000; // DATA NO XFER
2305 if(len) {
2306 /*
2307 * Set SCBFlags to indicate if data is being transferred
2308 * in or out, or no data transfer
2309 * Note: Do not have to verify index is less than 0 since
2310 * cmd->cmnd[0] is an unsigned char
2311 */
2312 switch(cmd->sc_data_direction){
2313 case DMA_FROM_DEVICE:
2314 scsidir =0x40000000; // DATA IN (iop<--dev)
2315 break;
2316 case DMA_TO_DEVICE:
2317 direction=0x04000000; // SGL OUT
2318 scsidir =0x80000000; // DATA OUT (iop-->dev)
2319 break;
2320 case DMA_NONE:
2321 break;
2322 case DMA_BIDIRECTIONAL:
2323 scsidir =0x40000000; // DATA IN (iop<--dev)
2324 // Assume In - and continue;
2325 break;
2326 default:
2327 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2328 pHba->name, cmd->cmnd[0]);
2329 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2330 cmd->scsi_done(cmd);
2331 return 0;
2332 }
2333 }
2334 // msg[0] is set later
2335 // I2O_CMD_SCSI_EXEC
2336 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2337 msg[2] = 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002338 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 // Our cards use the transaction context as the tag for queueing
2340 // Adaptec/DPT Private stuff
2341 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2342 msg[5] = d->tid;
2343 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2344 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2345 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2346 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2347 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2348
2349 mptr=msg+7;
2350
2351 // Write SCSI command into the message - always 16 byte block
2352 memset(mptr, 0, 16);
2353 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2354 mptr+=4;
2355 lenptr=mptr++; /* Remember me - fill in when we know */
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002356 if (dpt_dma64(pHba)) {
2357 reqlen = 16; // SINGLE SGE
2358 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2359 *mptr++ = 1 << PAGE_SHIFT;
2360 } else {
2361 reqlen = 14; // SINGLE SGE
2362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002365 nseg = scsi_dma_map(cmd);
2366 BUG_ON(nseg < 0);
2367 if (nseg) {
2368 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
2370 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002371 scsi_for_each_sg(cmd, sg, nseg, i) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002372 lptr = mptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2374 len+=sg_dma_len(sg);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002375 addr = sg_dma_address(sg);
2376 *mptr++ = dma_low(addr);
2377 if (dpt_dma64(pHba))
2378 *mptr++ = dma_high(addr);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002379 /* Make this an end of list */
2380 if (i == nseg - 1)
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002381 *lptr = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 reqlen = mptr - msg;
2384 *lenptr = len;
2385
2386 if(cmd->underflow && len != cmd->underflow){
2387 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2388 len, cmd->underflow);
2389 }
2390 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002391 *lenptr = len = 0;
2392 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 }
2394
2395 /* Stick the headers on */
2396 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2397
2398 // Send it on it's way
2399 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2400 if (rcode == 0) {
2401 return 0;
2402 }
2403 return rcode;
2404}
2405
2406
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002407static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
Andrew Morton24601bb2007-12-10 15:49:20 -08002408{
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002409 struct Scsi_Host *host;
Andrew Morton24601bb2007-12-10 15:49:20 -08002410
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002411 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
Andrew Morton24601bb2007-12-10 15:49:20 -08002412 if (host == NULL) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002413 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
Andrew Morton24601bb2007-12-10 15:49:20 -08002414 return -1;
2415 }
2416 host->hostdata[0] = (unsigned long)pHba;
2417 pHba->host = host;
2418
2419 host->irq = pHba->pDev->irq;
2420 /* no IO ports, so don't have to set host->io_port and
2421 * host->n_io_port
2422 */
2423 host->io_port = 0;
2424 host->n_io_port = 0;
2425 /* see comments in scsi_host.h */
2426 host->max_id = 16;
2427 host->max_lun = 256;
2428 host->max_channel = pHba->top_scsi_channel + 1;
2429 host->cmd_per_lun = 1;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002430 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
Andrew Morton24601bb2007-12-10 15:49:20 -08002431 host->sg_tablesize = pHba->sg_tablesize;
2432 host->can_queue = pHba->post_fifo_size;
2433
2434 return 0;
2435}
2436
2437
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002438static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439{
2440 adpt_hba* pHba;
2441 u32 hba_status;
2442 u32 dev_status;
2443 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2444 // I know this would look cleaner if I just read bytes
2445 // but the model I have been using for all the rest of the
2446 // io is in 4 byte words - so I keep that model
2447 u16 detailed_status = readl(reply+16) &0xffff;
2448 dev_status = (detailed_status & 0xff);
2449 hba_status = detailed_status >> 8;
2450
2451 // calculate resid for sg
Miquel van Smoorenburgdf81d232008-11-05 00:09:12 +01002452 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2455
2456 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2457
2458 if(!(reply_flags & MSG_FAIL)) {
2459 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2460 case I2O_SCSI_DSC_SUCCESS:
2461 cmd->result = (DID_OK << 16);
2462 // handle underflow
Miquel van Smoorenburgdf81d232008-11-05 00:09:12 +01002463 if (readl(reply+20) < cmd->underflow) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 cmd->result = (DID_ERROR <<16);
2465 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2466 }
2467 break;
2468 case I2O_SCSI_DSC_REQUEST_ABORTED:
2469 cmd->result = (DID_ABORT << 16);
2470 break;
2471 case I2O_SCSI_DSC_PATH_INVALID:
2472 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2473 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2474 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2475 case I2O_SCSI_DSC_NO_ADAPTER:
2476 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2477 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2478 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2479 cmd->result = (DID_TIME_OUT << 16);
2480 break;
2481 case I2O_SCSI_DSC_ADAPTER_BUSY:
2482 case I2O_SCSI_DSC_BUS_BUSY:
2483 cmd->result = (DID_BUS_BUSY << 16);
2484 break;
2485 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2486 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2487 cmd->result = (DID_RESET << 16);
2488 break;
2489 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2490 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2491 cmd->result = (DID_PARITY << 16);
2492 break;
2493 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2494 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2495 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2496 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2497 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2498 case I2O_SCSI_DSC_DATA_OVERRUN:
2499 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2500 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2501 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2502 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2503 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2504 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2505 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2506 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2507 case I2O_SCSI_DSC_INVALID_CDB:
2508 case I2O_SCSI_DSC_LUN_INVALID:
2509 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2510 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2511 case I2O_SCSI_DSC_NO_NEXUS:
2512 case I2O_SCSI_DSC_CDB_RECEIVED:
2513 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2514 case I2O_SCSI_DSC_QUEUE_FROZEN:
2515 case I2O_SCSI_DSC_REQUEST_INVALID:
2516 default:
2517 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2518 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2519 hba_status, dev_status, cmd->cmnd[0]);
2520 cmd->result = (DID_ERROR << 16);
2521 break;
2522 }
2523
2524 // copy over the request sense data if it was a check
2525 // condition status
Salyzyn, Markd814c512008-01-14 11:04:40 -08002526 if (dev_status == SAM_STAT_CHECK_CONDITION) {
FUJITA Tomonorib80ca4f2008-01-13 15:46:13 +09002527 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002529 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2531 cmd->sense_buffer[2] == DATA_PROTECT ){
2532 /* This is to handle an array failed */
2533 cmd->result = (DID_TIME_OUT << 16);
2534 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2535 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2536 hba_status, dev_status, cmd->cmnd[0]);
2537
2538 }
2539 }
2540 } else {
2541 /* In this condtion we could not talk to the tid
2542 * the card rejected it. We should signal a retry
2543 * for a limitted number of retries.
2544 */
2545 cmd->result = (DID_TIME_OUT << 16);
2546 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2547 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2548 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2549 }
2550
2551 cmd->result |= (dev_status);
2552
2553 if(cmd->scsi_done != NULL){
2554 cmd->scsi_done(cmd);
2555 }
2556 return cmd->result;
2557}
2558
2559
2560static s32 adpt_rescan(adpt_hba* pHba)
2561{
2562 s32 rcode;
2563 ulong flags = 0;
2564
2565 if(pHba->host)
2566 spin_lock_irqsave(pHba->host->host_lock, flags);
2567 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2568 goto out;
2569 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2570 goto out;
2571 rcode = 0;
2572out: if(pHba->host)
2573 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2574 return rcode;
2575}
2576
2577
2578static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2579{
2580 int i;
2581 int max;
2582 int tid;
2583 struct i2o_device *d;
2584 i2o_lct *lct = pHba->lct;
2585 u8 bus_no = 0;
2586 s16 scsi_id;
2587 s16 scsi_lun;
2588 u32 buf[10]; // at least 8 u32's
2589 struct adpt_device* pDev = NULL;
2590 struct i2o_device* pI2o_dev = NULL;
2591
2592 if (lct == NULL) {
2593 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2594 return -1;
2595 }
2596
2597 max = lct->table_size;
2598 max -= 3;
2599 max /= 9;
2600
2601 // Mark each drive as unscanned
2602 for (d = pHba->devices; d; d = d->next) {
2603 pDev =(struct adpt_device*) d->owner;
2604 if(!pDev){
2605 continue;
2606 }
2607 pDev->state |= DPTI_DEV_UNSCANNED;
2608 }
2609
2610 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2611
2612 for(i=0;i<max;i++) {
2613 if( lct->lct_entry[i].user_tid != 0xfff){
2614 continue;
2615 }
2616
2617 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2618 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2619 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2620 tid = lct->lct_entry[i].tid;
2621 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2622 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2623 continue;
2624 }
2625 bus_no = buf[0]>>16;
2626 scsi_id = buf[1];
2627 scsi_lun = (buf[2]>>8 )&0xff;
2628 pDev = pHba->channel[bus_no].device[scsi_id];
2629 /* da lun */
2630 while(pDev) {
2631 if(pDev->scsi_lun == scsi_lun) {
2632 break;
2633 }
2634 pDev = pDev->next_lun;
2635 }
2636 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002637 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 if(d==NULL)
2639 {
2640 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2641 return -ENOMEM;
2642 }
2643
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002644 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 d->next = NULL;
2646
2647 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2648
2649 d->flags = 0;
2650 adpt_i2o_report_hba_unit(pHba, d);
2651 adpt_i2o_install_device(pHba, d);
2652
2653 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2654 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2655 continue;
2656 }
2657 pDev = pHba->channel[bus_no].device[scsi_id];
2658 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302659 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 if(pDev == NULL) {
2661 return -ENOMEM;
2662 }
2663 pHba->channel[bus_no].device[scsi_id] = pDev;
2664 } else {
2665 while (pDev->next_lun) {
2666 pDev = pDev->next_lun;
2667 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302668 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 if(pDev == NULL) {
2670 return -ENOMEM;
2671 }
2672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 pDev->tid = d->lct_data.tid;
2674 pDev->scsi_channel = bus_no;
2675 pDev->scsi_id = scsi_id;
2676 pDev->scsi_lun = scsi_lun;
2677 pDev->pI2o_dev = d;
2678 d->owner = pDev;
2679 pDev->type = (buf[0])&0xff;
2680 pDev->flags = (buf[0]>>8)&0xff;
2681 // Too late, SCSI system has made up it's mind, but what the hey ...
2682 if(scsi_id > pHba->top_scsi_id){
2683 pHba->top_scsi_id = scsi_id;
2684 }
2685 if(scsi_lun > pHba->top_scsi_lun){
2686 pHba->top_scsi_lun = scsi_lun;
2687 }
2688 continue;
2689 } // end of new i2o device
2690
2691 // We found an old device - check it
2692 while(pDev) {
2693 if(pDev->scsi_lun == scsi_lun) {
2694 if(!scsi_device_online(pDev->pScsi_dev)) {
2695 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2696 pHba->name,bus_no,scsi_id,scsi_lun);
2697 if (pDev->pScsi_dev) {
2698 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2699 }
2700 }
2701 d = pDev->pI2o_dev;
2702 if(d->lct_data.tid != tid) { // something changed
2703 pDev->tid = tid;
2704 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2705 if (pDev->pScsi_dev) {
2706 pDev->pScsi_dev->changed = TRUE;
2707 pDev->pScsi_dev->removable = TRUE;
2708 }
2709 }
2710 // Found it - mark it scanned
2711 pDev->state = DPTI_DEV_ONLINE;
2712 break;
2713 }
2714 pDev = pDev->next_lun;
2715 }
2716 }
2717 }
2718 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2719 pDev =(struct adpt_device*) pI2o_dev->owner;
2720 if(!pDev){
2721 continue;
2722 }
2723 // Drive offline drives that previously existed but could not be found
2724 // in the LCT table
2725 if (pDev->state & DPTI_DEV_UNSCANNED){
2726 pDev->state = DPTI_DEV_OFFLINE;
2727 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2728 if (pDev->pScsi_dev) {
2729 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2730 }
2731 }
2732 }
2733 return 0;
2734}
2735
2736static void adpt_fail_posted_scbs(adpt_hba* pHba)
2737{
2738 struct scsi_cmnd* cmd = NULL;
2739 struct scsi_device* d = NULL;
2740
2741 shost_for_each_device(d, pHba->host) {
2742 unsigned long flags;
2743 spin_lock_irqsave(&d->list_lock, flags);
2744 list_for_each_entry(cmd, &d->cmd_list, list) {
2745 if(cmd->serial_number == 0){
2746 continue;
2747 }
2748 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2749 cmd->scsi_done(cmd);
2750 }
2751 spin_unlock_irqrestore(&d->list_lock, flags);
2752 }
2753}
2754
2755
2756/*============================================================================
2757 * Routines from i2o subsystem
2758 *============================================================================
2759 */
2760
2761
2762
2763/*
2764 * Bring an I2O controller into HOLD state. See the spec.
2765 */
2766static int adpt_i2o_activate_hba(adpt_hba* pHba)
2767{
2768 int rcode;
2769
2770 if(pHba->initialized ) {
2771 if (adpt_i2o_status_get(pHba) < 0) {
2772 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2773 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2774 return rcode;
2775 }
2776 if (adpt_i2o_status_get(pHba) < 0) {
2777 printk(KERN_INFO "HBA not responding.\n");
2778 return -1;
2779 }
2780 }
2781
2782 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2783 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2784 return -1;
2785 }
2786
2787 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2788 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2789 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2790 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2791 adpt_i2o_reset_hba(pHba);
2792 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2793 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2794 return -1;
2795 }
2796 }
2797 } else {
2798 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2799 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2800 return rcode;
2801 }
2802
2803 }
2804
2805 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2806 return -1;
2807 }
2808
2809 /* In HOLD state */
2810
2811 if (adpt_i2o_hrt_get(pHba) < 0) {
2812 return -1;
2813 }
2814
2815 return 0;
2816}
2817
2818/*
2819 * Bring a controller online into OPERATIONAL state.
2820 */
2821
2822static int adpt_i2o_online_hba(adpt_hba* pHba)
2823{
2824 if (adpt_i2o_systab_send(pHba) < 0) {
2825 adpt_i2o_delete_hba(pHba);
2826 return -1;
2827 }
2828 /* In READY state */
2829
2830 if (adpt_i2o_enable_hba(pHba) < 0) {
2831 adpt_i2o_delete_hba(pHba);
2832 return -1;
2833 }
2834
2835 /* In OPERATIONAL state */
2836 return 0;
2837}
2838
2839static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2840{
2841 u32 __iomem *msg;
2842 ulong timeout = jiffies + 5*HZ;
2843
2844 while(m == EMPTY_QUEUE){
2845 rmb();
2846 m = readl(pHba->post_port);
2847 if(m != EMPTY_QUEUE){
2848 break;
2849 }
2850 if(time_after(jiffies,timeout)){
2851 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2852 return 2;
2853 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002854 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 }
2856 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2857 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2858 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2859 writel( 0,&msg[2]);
2860 wmb();
2861
2862 writel(m, pHba->post_port);
2863 wmb();
2864 return 0;
2865}
2866
2867static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2868{
2869 u8 *status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002870 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 u32 __iomem *msg = NULL;
2872 int i;
2873 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 u32 m;
2875
2876 do {
2877 rmb();
2878 m = readl(pHba->post_port);
2879 if (m != EMPTY_QUEUE) {
2880 break;
2881 }
2882
2883 if(time_after(jiffies,timeout)){
2884 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2885 return -ETIMEDOUT;
2886 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002887 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 } while(m == EMPTY_QUEUE);
2889
2890 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2891
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002892 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002893 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 adpt_send_nop(pHba, m);
2895 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2896 pHba->name);
2897 return -ENOMEM;
2898 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002899 memset(status, 0, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
2901 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2902 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2903 writel(0, &msg[2]);
2904 writel(0x0106, &msg[3]); /* Transaction context */
2905 writel(4096, &msg[4]); /* Host page frame size */
2906 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2907 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002908 writel((u32)addr, &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
2910 writel(m, pHba->post_port);
2911 wmb();
2912
2913 // Wait for the reply status to come back
2914 do {
2915 if (*status) {
2916 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2917 break;
2918 }
2919 }
2920 rmb();
2921 if(time_after(jiffies,timeout)){
2922 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002923 /* We lose 4 bytes of "status" here, but we
2924 cannot free these because controller may
2925 awake and corrupt those bytes at any time */
2926 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 return -ETIMEDOUT;
2928 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002929 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 } while (1);
2931
2932 // If the command was successful, fill the fifo with our reply
2933 // message packets
2934 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002935 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 return -2;
2937 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002938 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002940 if(pHba->reply_pool != NULL) {
2941 dma_free_coherent(&pHba->pDev->dev,
2942 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2943 pHba->reply_pool, pHba->reply_pool_pa);
2944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002946 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2947 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2948 &pHba->reply_pool_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002949 if (!pHba->reply_pool) {
2950 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2951 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002953 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 for(i = 0; i < pHba->reply_fifo_size; i++) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002956 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2957 pHba->reply_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 }
2960 adpt_i2o_status_get(pHba);
2961 return 0;
2962}
2963
2964
2965/*
2966 * I2O System Table. Contains information about
2967 * all the IOPs in the system. Used to inform IOPs
2968 * about each other's existence.
2969 *
2970 * sys_tbl_ver is the CurrentChangeIndicator that is
2971 * used by IOPs to track changes.
2972 */
2973
2974
2975
2976static s32 adpt_i2o_status_get(adpt_hba* pHba)
2977{
2978 ulong timeout;
2979 u32 m;
2980 u32 __iomem *msg;
2981 u8 *status_block=NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
2983 if(pHba->status_block == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002984 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2985 sizeof(i2o_status_block),
2986 &pHba->status_block_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 if(pHba->status_block == NULL) {
2988 printk(KERN_ERR
2989 "dpti%d: Get Status Block failed; Out of memory. \n",
2990 pHba->unit);
2991 return -ENOMEM;
2992 }
2993 }
2994 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2995 status_block = (u8*)(pHba->status_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2997 do {
2998 rmb();
2999 m = readl(pHba->post_port);
3000 if (m != EMPTY_QUEUE) {
3001 break;
3002 }
3003 if(time_after(jiffies,timeout)){
3004 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3005 pHba->name);
3006 return -ETIMEDOUT;
3007 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08003008 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 } while(m==EMPTY_QUEUE);
3010
3011
3012 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3013
3014 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3015 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3016 writel(1, &msg[2]);
3017 writel(0, &msg[3]);
3018 writel(0, &msg[4]);
3019 writel(0, &msg[5]);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003020 writel( dma_low(pHba->status_block_pa), &msg[6]);
3021 writel( dma_high(pHba->status_block_pa), &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3023
3024 //post message
3025 writel(m, pHba->post_port);
3026 wmb();
3027
3028 while(status_block[87]!=0xff){
3029 if(time_after(jiffies,timeout)){
3030 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3031 pHba->unit);
3032 return -ETIMEDOUT;
3033 }
3034 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08003035 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 }
3037
3038 // Set up our number of outbound and inbound messages
3039 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3040 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3041 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3042 }
3043
3044 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3045 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3046 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3047 }
3048
3049 // Calculate the Scatter Gather list size
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003050 if (dpt_dma64(pHba)) {
3051 pHba->sg_tablesize
3052 = ((pHba->status_block->inbound_frame_size * 4
3053 - 14 * sizeof(u32))
3054 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3055 } else {
3056 pHba->sg_tablesize
3057 = ((pHba->status_block->inbound_frame_size * 4
3058 - 12 * sizeof(u32))
3059 / sizeof(struct sg_simple_element));
3060 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3062 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3063 }
3064
3065
3066#ifdef DEBUG
3067 printk("dpti%d: State = ",pHba->unit);
3068 switch(pHba->status_block->iop_state) {
3069 case 0x01:
3070 printk("INIT\n");
3071 break;
3072 case 0x02:
3073 printk("RESET\n");
3074 break;
3075 case 0x04:
3076 printk("HOLD\n");
3077 break;
3078 case 0x05:
3079 printk("READY\n");
3080 break;
3081 case 0x08:
3082 printk("OPERATIONAL\n");
3083 break;
3084 case 0x10:
3085 printk("FAILED\n");
3086 break;
3087 case 0x11:
3088 printk("FAULTED\n");
3089 break;
3090 default:
3091 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3092 }
3093#endif
3094 return 0;
3095}
3096
3097/*
3098 * Get the IOP's Logical Configuration Table
3099 */
3100static int adpt_i2o_lct_get(adpt_hba* pHba)
3101{
3102 u32 msg[8];
3103 int ret;
3104 u32 buf[16];
3105
3106 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3107 pHba->lct_size = pHba->status_block->expected_lct_size;
3108 }
3109 do {
3110 if (pHba->lct == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003111 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3112 pHba->lct_size, &pHba->lct_pa,
3113 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 if(pHba->lct == NULL) {
3115 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3116 pHba->name);
3117 return -ENOMEM;
3118 }
3119 }
3120 memset(pHba->lct, 0, pHba->lct_size);
3121
3122 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3123 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3124 msg[2] = 0;
3125 msg[3] = 0;
3126 msg[4] = 0xFFFFFFFF; /* All devices */
3127 msg[5] = 0x00000000; /* Report now */
3128 msg[6] = 0xD0000000|pHba->lct_size;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003129 msg[7] = (u32)pHba->lct_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130
3131 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3132 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3133 pHba->name, ret);
3134 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3135 return ret;
3136 }
3137
3138 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3139 pHba->lct_size = pHba->lct->table_size << 2;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003140 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3141 pHba->lct, pHba->lct_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 pHba->lct = NULL;
3143 }
3144 } while (pHba->lct == NULL);
3145
3146 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3147
3148
3149 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3150 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3151 pHba->FwDebugBufferSize = buf[1];
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003152 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3153 pHba->FwDebugBufferSize);
3154 if (pHba->FwDebugBuffer_P) {
3155 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3156 FW_DEBUG_FLAGS_OFFSET;
3157 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3158 FW_DEBUG_BLED_OFFSET;
3159 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3160 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3161 FW_DEBUG_STR_LENGTH_OFFSET;
3162 pHba->FwDebugBuffer_P += buf[2];
3163 pHba->FwDebugFlags = 0;
3164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 }
3166
3167 return 0;
3168}
3169
3170static int adpt_i2o_build_sys_table(void)
3171{
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003172 adpt_hba* pHba = hba_chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 int count = 0;
3174
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003175 if (sys_tbl)
3176 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3177 sys_tbl, sys_tbl_pa);
3178
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3180 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3181
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003182 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3183 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02003184 if (!sys_tbl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3186 return -ENOMEM;
3187 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003188 memset(sys_tbl, 0, sys_tbl_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189
3190 sys_tbl->num_entries = hba_count;
3191 sys_tbl->version = I2OVERSION;
3192 sys_tbl->change_ind = sys_tbl_ind++;
3193
3194 for(pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003195 u64 addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 // Get updated Status Block so we have the latest information
3197 if (adpt_i2o_status_get(pHba)) {
3198 sys_tbl->num_entries--;
3199 continue; // try next one
3200 }
3201
3202 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3203 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3204 sys_tbl->iops[count].seg_num = 0;
3205 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3206 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3207 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3208 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3209 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3210 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003211 addr = pHba->base_addr_phys + 0x40;
3212 sys_tbl->iops[count].inbound_low = dma_low(addr);
3213 sys_tbl->iops[count].inbound_high = dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
3215 count++;
3216 }
3217
3218#ifdef DEBUG
3219{
3220 u32 *table = (u32*)sys_tbl;
3221 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3222 for(count = 0; count < (sys_tbl_len >>2); count++) {
3223 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3224 count, table[count]);
3225 }
3226}
3227#endif
3228
3229 return 0;
3230}
3231
3232
3233/*
3234 * Dump the information block associated with a given unit (TID)
3235 */
3236
3237static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3238{
3239 char buf[64];
3240 int unit = d->lct_data.tid;
3241
3242 printk(KERN_INFO "TID %3.3d ", unit);
3243
3244 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3245 {
3246 buf[16]=0;
3247 printk(" Vendor: %-12.12s", buf);
3248 }
3249 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3250 {
3251 buf[16]=0;
3252 printk(" Device: %-12.12s", buf);
3253 }
3254 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3255 {
3256 buf[8]=0;
3257 printk(" Rev: %-12.12s\n", buf);
3258 }
3259#ifdef DEBUG
3260 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3261 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3262 printk(KERN_INFO "\tFlags: ");
3263
3264 if(d->lct_data.device_flags&(1<<0))
3265 printk("C"); // ConfigDialog requested
3266 if(d->lct_data.device_flags&(1<<1))
3267 printk("U"); // Multi-user capable
3268 if(!(d->lct_data.device_flags&(1<<4)))
3269 printk("P"); // Peer service enabled!
3270 if(!(d->lct_data.device_flags&(1<<5)))
3271 printk("M"); // Mgmt service enabled!
3272 printk("\n");
3273#endif
3274}
3275
3276#ifdef DEBUG
3277/*
3278 * Do i2o class name lookup
3279 */
3280static const char *adpt_i2o_get_class_name(int class)
3281{
3282 int idx = 16;
3283 static char *i2o_class_name[] = {
3284 "Executive",
3285 "Device Driver Module",
3286 "Block Device",
3287 "Tape Device",
3288 "LAN Interface",
3289 "WAN Interface",
3290 "Fibre Channel Port",
3291 "Fibre Channel Device",
3292 "SCSI Device",
3293 "ATE Port",
3294 "ATE Device",
3295 "Floppy Controller",
3296 "Floppy Device",
3297 "Secondary Bus Port",
3298 "Peer Transport Agent",
3299 "Peer Transport",
3300 "Unknown"
3301 };
3302
3303 switch(class&0xFFF) {
3304 case I2O_CLASS_EXECUTIVE:
3305 idx = 0; break;
3306 case I2O_CLASS_DDM:
3307 idx = 1; break;
3308 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3309 idx = 2; break;
3310 case I2O_CLASS_SEQUENTIAL_STORAGE:
3311 idx = 3; break;
3312 case I2O_CLASS_LAN:
3313 idx = 4; break;
3314 case I2O_CLASS_WAN:
3315 idx = 5; break;
3316 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3317 idx = 6; break;
3318 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3319 idx = 7; break;
3320 case I2O_CLASS_SCSI_PERIPHERAL:
3321 idx = 8; break;
3322 case I2O_CLASS_ATE_PORT:
3323 idx = 9; break;
3324 case I2O_CLASS_ATE_PERIPHERAL:
3325 idx = 10; break;
3326 case I2O_CLASS_FLOPPY_CONTROLLER:
3327 idx = 11; break;
3328 case I2O_CLASS_FLOPPY_DEVICE:
3329 idx = 12; break;
3330 case I2O_CLASS_BUS_ADAPTER_PORT:
3331 idx = 13; break;
3332 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3333 idx = 14; break;
3334 case I2O_CLASS_PEER_TRANSPORT:
3335 idx = 15; break;
3336 }
3337 return i2o_class_name[idx];
3338}
3339#endif
3340
3341
3342static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3343{
3344 u32 msg[6];
3345 int ret, size = sizeof(i2o_hrt);
3346
3347 do {
3348 if (pHba->hrt == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003349 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3350 size, &pHba->hrt_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 if (pHba->hrt == NULL) {
3352 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3353 return -ENOMEM;
3354 }
3355 }
3356
3357 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3358 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3359 msg[2]= 0;
3360 msg[3]= 0;
3361 msg[4]= (0xD0000000 | size); /* Simple transaction */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003362 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
3364 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3365 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3366 return ret;
3367 }
3368
3369 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003370 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3371 dma_free_coherent(&pHba->pDev->dev, size,
3372 pHba->hrt, pHba->hrt_pa);
3373 size = newsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 pHba->hrt = NULL;
3375 }
3376 } while(pHba->hrt == NULL);
3377 return 0;
3378}
3379
3380/*
3381 * Query one scalar group value or a whole scalar group.
3382 */
3383static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3384 int group, int field, void *buf, int buflen)
3385{
3386 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003387 u8 *opblk_va;
3388 dma_addr_t opblk_pa;
3389 u8 *resblk_va;
3390 dma_addr_t resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391
3392 int size;
3393
3394 /* 8 bytes for header */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003395 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3396 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3397 if (resblk_va == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3399 return -ENOMEM;
3400 }
3401
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003402 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3403 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3404 if (opblk_va == NULL) {
3405 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3406 resblk_va, resblk_pa);
3407 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3408 pHba->name);
3409 return -ENOMEM;
3410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 if (field == -1) /* whole group */
3412 opblk[4] = -1;
3413
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003414 memcpy(opblk_va, opblk, sizeof(opblk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003416 opblk_va, opblk_pa, sizeof(opblk),
3417 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3418 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 if (size == -ETIME) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003420 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3421 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3423 return -ETIME;
3424 } else if (size == -EINTR) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003425 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3426 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3428 return -EINTR;
3429 }
3430
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003431 memcpy(buf, resblk_va+8, buflen); /* cut off header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003433 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3434 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 if (size < 0)
3436 return size;
3437
3438 return buflen;
3439}
3440
3441
3442/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3443 *
3444 * This function can be used for all UtilParamsGet/Set operations.
3445 * The OperationBlock is given in opblk-buffer,
3446 * and results are returned in resblk-buffer.
3447 * Note that the minimum sized resblk is 8 bytes and contains
3448 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3449 */
3450static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003451 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3452 void *resblk_va, dma_addr_t resblk_pa, int reslen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453{
3454 u32 msg[9];
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003455 u32 *res = (u32 *)resblk_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 int wait_status;
3457
3458 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3459 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3460 msg[2] = 0;
3461 msg[3] = 0;
3462 msg[4] = 0;
3463 msg[5] = 0x54000000 | oplen; /* OperationBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003464 msg[6] = (u32)opblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003466 msg[8] = (u32)resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
3468 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003469 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 return wait_status; /* -DetailedStatus */
3471 }
3472
3473 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3474 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3475 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3476 pHba->name,
3477 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3478 : "PARAMS_GET",
3479 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3480 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3481 }
3482
3483 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3484}
3485
3486
3487static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3488{
3489 u32 msg[4];
3490 int ret;
3491
3492 adpt_i2o_status_get(pHba);
3493
3494 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3495
3496 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3497 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3498 return 0;
3499 }
3500
3501 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3502 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3503 msg[2] = 0;
3504 msg[3] = 0;
3505
3506 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3507 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3508 pHba->unit, -ret);
3509 } else {
3510 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3511 }
3512
3513 adpt_i2o_status_get(pHba);
3514 return ret;
3515}
3516
3517
3518/*
3519 * Enable IOP. Allows the IOP to resume external operations.
3520 */
3521static int adpt_i2o_enable_hba(adpt_hba* pHba)
3522{
3523 u32 msg[4];
3524 int ret;
3525
3526 adpt_i2o_status_get(pHba);
3527 if(!pHba->status_block){
3528 return -ENOMEM;
3529 }
3530 /* Enable only allowed on READY state */
3531 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3532 return 0;
3533
3534 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3535 return -EINVAL;
3536
3537 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3538 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3539 msg[2]= 0;
3540 msg[3]= 0;
3541
3542 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3543 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3544 pHba->name, ret);
3545 } else {
3546 PDEBUG("%s: Enabled.\n", pHba->name);
3547 }
3548
3549 adpt_i2o_status_get(pHba);
3550 return ret;
3551}
3552
3553
3554static int adpt_i2o_systab_send(adpt_hba* pHba)
3555{
3556 u32 msg[12];
3557 int ret;
3558
3559 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3560 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3561 msg[2] = 0;
3562 msg[3] = 0;
3563 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3564 msg[5] = 0; /* Segment 0 */
3565
3566 /*
3567 * Provide three SGL-elements:
3568 * System table (SysTab), Private memory space declaration and
3569 * Private i/o space declaration
3570 */
3571 msg[6] = 0x54000000 | sys_tbl_len;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003572 msg[7] = (u32)sys_tbl_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 msg[8] = 0x54000000 | 0;
3574 msg[9] = 0;
3575 msg[10] = 0xD4000000 | 0;
3576 msg[11] = 0;
3577
3578 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3579 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3580 pHba->name, ret);
3581 }
3582#ifdef DEBUG
3583 else {
3584 PINFO("%s: SysTab set.\n", pHba->name);
3585 }
3586#endif
3587
3588 return ret;
3589 }
3590
3591
3592/*============================================================================
3593 *
3594 *============================================================================
3595 */
3596
3597
3598#ifdef UARTDELAY
3599
3600static static void adpt_delay(int millisec)
3601{
3602 int i;
3603 for (i = 0; i < millisec; i++) {
3604 udelay(1000); /* delay for one millisecond */
3605 }
3606}
3607
3608#endif
3609
Andrew Morton24601bb2007-12-10 15:49:20 -08003610static struct scsi_host_template driver_template = {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003611 .module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 .name = "dpt_i2o",
3613 .proc_name = "dpt_i2o",
3614 .proc_info = adpt_proc_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 .info = adpt_info,
3616 .queuecommand = adpt_queue,
3617 .eh_abort_handler = adpt_abort,
3618 .eh_device_reset_handler = adpt_device_reset,
3619 .eh_bus_reset_handler = adpt_bus_reset,
3620 .eh_host_reset_handler = adpt_reset,
3621 .bios_param = adpt_bios_param,
3622 .slave_configure = adpt_slave_configure,
3623 .can_queue = MAX_TO_IOP_MESSAGES,
3624 .this_id = 7,
3625 .cmd_per_lun = 1,
3626 .use_clustering = ENABLE_CLUSTERING,
3627};
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003628
3629static int __init adpt_init(void)
3630{
3631 int error;
3632 adpt_hba *pHba, *next;
3633
3634 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3635
3636 error = adpt_detect(&driver_template);
3637 if (error < 0)
3638 return error;
3639 if (hba_chain == NULL)
3640 return -ENODEV;
3641
3642 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3643 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3644 if (error)
3645 goto fail;
3646 scsi_scan_host(pHba->host);
3647 }
3648 return 0;
3649fail:
3650 for (pHba = hba_chain; pHba; pHba = next) {
3651 next = pHba->next;
3652 scsi_remove_host(pHba->host);
3653 }
3654 return error;
3655}
3656
3657static void __exit adpt_exit(void)
3658{
3659 adpt_hba *pHba, *next;
3660
3661 for (pHba = hba_chain; pHba; pHba = pHba->next)
3662 scsi_remove_host(pHba->host);
3663 for (pHba = hba_chain; pHba; pHba = next) {
3664 next = pHba->next;
3665 adpt_release(pHba->host);
3666 }
3667}
3668
3669module_init(adpt_init);
3670module_exit(adpt_exit);
3671
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672MODULE_LICENSE("GPL");