blob: 19e1b422260a40ec648d3c2f548dfe198d9c1e07 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080053#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010058#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
Arnd Bergmannc45d15d2010-06-02 14:28:52 +020078static DEFINE_MUTEX(adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
Arjan van de Ven0b950672006-01-11 13:16:10 +0100105static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200115static struct class *adpt_sysfs_class;
116
Arnd Bergmannf4927c42010-04-27 00:24:01 +0200117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
Arjan van de Ven00977a52007-02-12 00:55:34 -0800122static const struct file_operations adpt_fops = {
Arnd Bergmannf4927c42010-04-27 00:24:01 +0200123 .unlocked_ioctl = adpt_unlocked_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 .open = adpt_open,
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#endif
Arnd Bergmann6038f372010-08-15 18:52:59 +0200129 .llseek = noop_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132/* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148/*============================================================================
149 * Functions
150 *============================================================================
151 */
152
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168static u8 adpt_read_blink_led(adpt_hba* host)
169{
Harvey Harrison172c1222008-04-28 16:50:03 -0700170 if (host->FwDebugBLEDflag_P) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178/*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
181 */
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
Andrew Morton24601bb2007-12-10 15:49:20 -0800190static int adpt_detect(struct scsi_host_template* sht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct pci_dev *pDev = NULL;
Dan Carpenter229bab62010-03-15 11:26:56 +0300193 adpt_hba *pHba;
194 adpt_hba *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Andrew Morton24601bb2007-12-10 15:49:20 -0800202 if(adpt_install_hba(sht, pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
Alan Coxa07f3532006-09-15 15:34:32 +0100207 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209 }
210
211 /* In INIT state, Activate IOPs */
Dan Carpenter229bab62010-03-15 11:26:56 +0300212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
Dan Carpenter229bab62010-03-15 11:26:56 +0300250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
Dan Carpenter229bab62010-03-15 11:26:56 +0300271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +0200273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200279 if (adpt_sysfs_class) {
Greg Kroah-Hartmand73a1a62008-07-21 20:03:34 -0700280 struct device *dev = device_create(adpt_sysfs_class,
Greg Kroah-Hartman9def0b92008-05-21 12:52:33 -0700281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Andrew Morton24601bb2007-12-10 15:49:20 -0800295 adpt_i2o_sys_shutdown();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return 0;
297 }
298 return hba_count;
299}
300
301
Andrew Morton24601bb2007-12-10 15:49:20 -0800302/*
303 * scsi_unregister will be called AFTER we return.
304 */
305static int adpt_release(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Andrew Morton24601bb2007-12-10 15:49:20 -0800307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308// adpt_i2o_quiesce_hba(pHba);
309 adpt_i2o_delete_hba(pHba);
Andrew Morton24601bb2007-12-10 15:49:20 -0800310 scsi_unregister(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200317 u32 msg[17];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200325 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000; // DATA IN (iop<--dev)
340
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200341 if (dpt_dma64(pHba))
342 reqlen = 17; // SINGLE SGE, 64 bit
343 else
344 reqlen = 14; // SINGLE SGE, 32 bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 /* Stick the headers on */
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350 // Adaptec/DPT Private stuff
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362 // Write SCSI command into the message - always 16 byte block
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369 // Don't care about the rest of scb
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++; /* Remember me - fill in when we know */
374
375 /* Now fill in the SGList and command */
376 *lenptr = len;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 // Send it on it's way
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0'; /* precautionary */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
Jeff Garzikf2812332010-11-16 02:10:29 -0500426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
431 cmd->scsi_done = done;
432 /*
433 * SCSI REQUEST_SENSE commands will be executed automatically by the
434 * Host Adapter for any errors, so they should not be executed
435 * explicitly unless the Sense Data is zero indicating that no error
436 * occurred.
437 */
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
451 /*
452 * TODO: I need to block here if I am processing ioctl cmds
453 * but if the outstanding cmds all finish before the ioctl,
454 * the scsi-core will not know to start sending cmds to me again.
455 * I need to a way to restart the scsi-cores queues or should I block
456 * calling scsi_done on the outstanding cmds instead
457 * for now we don't set the IOCTL state
458 */
459 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
460 pHba->host->last_reset = jiffies;
461 pHba->host->resetting = 1;
462 return 1;
463 }
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 // TODO if the cmd->device if offline then I may need to issue a bus rescan
466 // followed by a get_lct to see if the device is there anymore
467 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
468 /*
469 * First command request for this device. Set up a pointer
470 * to the device structure. This should be a TEST_UNIT_READY
471 * command from scan_scsis_single.
472 */
473 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
474 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
475 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
476 cmd->result = (DID_NO_CONNECT << 16);
477 cmd->scsi_done(cmd);
478 return 0;
479 }
480 cmd->device->hostdata = pDev;
481 }
482 pDev->pScsi_dev = cmd->device;
483
484 /*
485 * If we are being called from when the device is being reset,
486 * delay processing of the command until later.
487 */
488 if (pDev->state & DPTI_DEV_RESET ) {
489 return FAILED;
490 }
491 return adpt_scsi_to_i2o(pHba, cmd, pDev);
492}
493
Jeff Garzikf2812332010-11-16 02:10:29 -0500494static DEF_SCSI_QCMD(adpt_queue)
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
497 sector_t capacity, int geom[])
498{
499 int heads=-1;
500 int sectors=-1;
501 int cylinders=-1;
502
503 // *** First lets set the default geometry ****
504
505 // If the capacity is less than ox2000
506 if (capacity < 0x2000 ) { // floppy
507 heads = 18;
508 sectors = 2;
509 }
510 // else if between 0x2000 and 0x20000
511 else if (capacity < 0x20000) {
512 heads = 64;
513 sectors = 32;
514 }
515 // else if between 0x20000 and 0x40000
516 else if (capacity < 0x40000) {
517 heads = 65;
518 sectors = 63;
519 }
520 // else if between 0x4000 and 0x80000
521 else if (capacity < 0x80000) {
522 heads = 128;
523 sectors = 63;
524 }
525 // else if greater than 0x80000
526 else {
527 heads = 255;
528 sectors = 63;
529 }
530 cylinders = sector_div(capacity, heads * sectors);
531
532 // Special case if CDROM
533 if(sdev->type == 5) { // CDROM
534 heads = 252;
535 sectors = 63;
536 cylinders = 1111;
537 }
538
539 geom[0] = heads;
540 geom[1] = sectors;
541 geom[2] = cylinders;
542
543 PDEBUG("adpt_bios_param: exit\n");
544 return 0;
545}
546
547
548static const char *adpt_info(struct Scsi_Host *host)
549{
550 adpt_hba* pHba;
551
552 pHba = (adpt_hba *) host->hostdata[0];
553 return (char *) (pHba->detail);
554}
555
Al Viroff98f7c2013-03-31 03:21:50 -0400556static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
558 struct adpt_device* d;
559 int id;
560 int chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 adpt_hba* pHba;
562 int unit;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100565 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 for (pHba = hba_chain; pHba; pHba = pHba->next) {
567 if (pHba->host == host) {
568 break; /* found adapter */
569 }
570 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100571 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 if (pHba == NULL) {
573 return 0;
574 }
575 host = pHba->host;
576
Al Viroff98f7c2013-03-31 03:21:50 -0400577 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
578 seq_printf(m, "%s\n", pHba->detail);
579 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 pHba->host->host_no, pHba->name, host->irq);
Al Viroff98f7c2013-03-31 03:21:50 -0400581 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
583
Al Viroff98f7c2013-03-31 03:21:50 -0400584 seq_printf(m, "Devices:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 for(chan = 0; chan < MAX_CHANNEL; chan++) {
586 for(id = 0; id < MAX_ID; id++) {
587 d = pHba->channel[chan].device[id];
Al Viroff98f7c2013-03-31 03:21:50 -0400588 while(d) {
589 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
590 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
592 unit = d->pI2o_dev->lct_data.tid;
Al Viroff98f7c2013-03-31 03:21:50 -0400593 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
595 scsi_device_online(d->pScsi_dev)? "online":"offline");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 d = d->next_lun;
597 }
598 }
599 }
Al Viroff98f7c2013-03-31 03:21:50 -0400600 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601}
602
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200603/*
604 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
605 */
606static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
607{
608 return (u32)cmd->serial_number;
609}
610
611/*
612 * Go from a u32 'context' to a struct scsi_cmnd * .
613 * This could probably be made more efficient.
614 */
615static struct scsi_cmnd *
616 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
617{
618 struct scsi_cmnd * cmd;
619 struct scsi_device * d;
620
621 if (context == 0)
622 return NULL;
623
624 spin_unlock(pHba->host->host_lock);
625 shost_for_each_device(d, pHba->host) {
626 unsigned long flags;
627 spin_lock_irqsave(&d->list_lock, flags);
628 list_for_each_entry(cmd, &d->cmd_list, list) {
629 if (((u32)cmd->serial_number == context)) {
630 spin_unlock_irqrestore(&d->list_lock, flags);
631 scsi_device_put(d);
632 spin_lock(pHba->host->host_lock);
633 return cmd;
634 }
635 }
636 spin_unlock_irqrestore(&d->list_lock, flags);
637 }
638 spin_lock(pHba->host->host_lock);
639
640 return NULL;
641}
642
643/*
644 * Turn a pointer to ioctl reply data into an u32 'context'
645 */
646static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
647{
648#if BITS_PER_LONG == 32
649 return (u32)(unsigned long)reply;
650#else
651 ulong flags = 0;
652 u32 nr, i;
653
654 spin_lock_irqsave(pHba->host->host_lock, flags);
655 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
656 for (i = 0; i < nr; i++) {
657 if (pHba->ioctl_reply_context[i] == NULL) {
658 pHba->ioctl_reply_context[i] = reply;
659 break;
660 }
661 }
662 spin_unlock_irqrestore(pHba->host->host_lock, flags);
663 if (i >= nr) {
664 kfree (reply);
665 printk(KERN_WARNING"%s: Too many outstanding "
666 "ioctl commands\n", pHba->name);
667 return (u32)-1;
668 }
669
670 return i;
671#endif
672}
673
674/*
675 * Go from an u32 'context' to a pointer to ioctl reply data.
676 */
677static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
678{
679#if BITS_PER_LONG == 32
680 return (void *)(unsigned long)context;
681#else
682 void *p = pHba->ioctl_reply_context[context];
683 pHba->ioctl_reply_context[context] = NULL;
684
685 return p;
686#endif
687}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689/*===========================================================================
690 * Error Handling routines
691 *===========================================================================
692 */
693
694static int adpt_abort(struct scsi_cmnd * cmd)
695{
696 adpt_hba* pHba = NULL; /* host bus adapter structure */
697 struct adpt_device* dptdevice; /* dpt per device information */
698 u32 msg[5];
699 int rcode;
700
701 if(cmd->serial_number == 0){
702 return FAILED;
703 }
704 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
Christoph Hellwig5cd049a2011-04-04 09:42:14 -0400705 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
707 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
708 return FAILED;
709 }
710
711 memset(msg, 0, sizeof(msg));
712 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
713 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
714 msg[2] = 0;
715 msg[3]= 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200716 msg[4] = adpt_cmd_to_context(cmd);
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800717 if (pHba->host)
718 spin_lock_irq(pHba->host->host_lock);
719 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
720 if (pHba->host)
721 spin_unlock_irq(pHba->host->host_lock);
722 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if(rcode == -EOPNOTSUPP ){
724 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
725 return FAILED;
726 }
Christoph Hellwig5cd049a2011-04-04 09:42:14 -0400727 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return FAILED;
729 }
Christoph Hellwig5cd049a2011-04-04 09:42:14 -0400730 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return SUCCESS;
732}
733
734
735#define I2O_DEVICE_RESET 0x27
736// This is the same for BLK and SCSI devices
737// NOTE this is wrong in the i2o.h definitions
738// This is not currently supported by our adapter but we issue it anyway
739static int adpt_device_reset(struct scsi_cmnd* cmd)
740{
741 adpt_hba* pHba;
742 u32 msg[4];
743 u32 rcode;
744 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700745 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
747 pHba = (void*) cmd->device->host->hostdata[0];
748 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
749 if (!d) {
750 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
751 return FAILED;
752 }
753 memset(msg, 0, sizeof(msg));
754 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
755 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
756 msg[2] = 0;
757 msg[3] = 0;
758
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800759 if (pHba->host)
760 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 old_state = d->state;
762 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800763 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
764 d->state = old_state;
765 if (pHba->host)
766 spin_unlock_irq(pHba->host->host_lock);
767 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if(rcode == -EOPNOTSUPP ){
769 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
770 return FAILED;
771 }
772 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
773 return FAILED;
774 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
776 return SUCCESS;
777 }
778}
779
780
781#define I2O_HBA_BUS_RESET 0x87
782// This version of bus reset is called by the eh_error handler
783static int adpt_bus_reset(struct scsi_cmnd* cmd)
784{
785 adpt_hba* pHba;
786 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800787 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
790 memset(msg, 0, sizeof(msg));
791 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
792 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
793 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
794 msg[2] = 0;
795 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800796 if (pHba->host)
797 spin_lock_irq(pHba->host->host_lock);
798 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
799 if (pHba->host)
800 spin_unlock_irq(pHba->host->host_lock);
801 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
803 return FAILED;
804 } else {
805 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
806 return SUCCESS;
807 }
808}
809
810// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400811static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
813 adpt_hba* pHba;
814 int rcode;
815 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
816 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
817 rcode = adpt_hba_reset(pHba);
818 if(rcode == 0){
819 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
820 return SUCCESS;
821 } else {
822 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
823 return FAILED;
824 }
825}
826
Jeff Garzik df0ae242005-05-28 07:57:14 -0400827static int adpt_reset(struct scsi_cmnd* cmd)
828{
829 int rc;
830
831 spin_lock_irq(cmd->device->host->host_lock);
832 rc = __adpt_reset(cmd);
833 spin_unlock_irq(cmd->device->host->host_lock);
834
835 return rc;
836}
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
839static int adpt_hba_reset(adpt_hba* pHba)
840{
841 int rcode;
842
843 pHba->state |= DPTI_STATE_RESET;
844
845 // Activate does get status , init outbound, and get hrt
846 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
847 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
848 adpt_i2o_delete_hba(pHba);
849 return rcode;
850 }
851
852 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
853 adpt_i2o_delete_hba(pHba);
854 return rcode;
855 }
856 PDEBUG("%s: in HOLD state\n",pHba->name);
857
858 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
859 adpt_i2o_delete_hba(pHba);
860 return rcode;
861 }
862 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
863
864 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
865 adpt_i2o_delete_hba(pHba);
866 return rcode;
867 }
868
869 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
870 adpt_i2o_delete_hba(pHba);
871 return rcode;
872 }
873 pHba->state &= ~DPTI_STATE_RESET;
874
875 adpt_fail_posted_scbs(pHba);
876 return 0; /* return success */
877}
878
879/*===========================================================================
880 *
881 *===========================================================================
882 */
883
884
885static void adpt_i2o_sys_shutdown(void)
886{
887 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100888 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
891 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
892 /* Delete all IOPs from the controller chain */
893 /* They should have already been released by the
894 * scsi-core
895 */
896 for (pHba = hba_chain; pHba; pHba = pNext) {
897 pNext = pHba->next;
898 adpt_i2o_delete_hba(pHba);
899 }
900
901 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902// spin_lock_irqsave(&adpt_post_wait_lock, flags);
903 /* Nothing should be outstanding at this point so just
904 * free them
905 */
Adrian Bunk458af542005-11-27 00:36:37 +0100906 for(p1 = adpt_post_wait_queue; p1;) {
907 old = p1;
908 p1 = p1->next;
909 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
911// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
912 adpt_post_wait_queue = NULL;
913
914 printk(KERN_INFO "Adaptec I2O controllers down.\n");
915}
916
Andrew Morton24601bb2007-12-10 15:49:20 -0800917static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918{
919
920 adpt_hba* pHba = NULL;
921 adpt_hba* p = NULL;
922 ulong base_addr0_phys = 0;
923 ulong base_addr1_phys = 0;
924 u32 hba_map0_area_size = 0;
925 u32 hba_map1_area_size = 0;
926 void __iomem *base_addr_virt = NULL;
927 void __iomem *msg_addr_virt = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200928 int dma64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 if(pci_enable_device(pDev)) {
933 return -EINVAL;
934 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500935
936 if (pci_request_regions(pDev, "dpt_i2o")) {
937 PERROR("dpti: adpt_config_hba: pci request region failed\n");
938 return -EINVAL;
939 }
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 pci_set_master(pDev);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200942
943 /*
944 * See if we should enable dma64 mode.
945 */
946 if (sizeof(dma_addr_t) > 4 &&
Yang Hongyang6a355282009-04-06 19:01:13 -0700947 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
Yang Hongyang284901a2009-04-06 19:01:15 -0700948 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200949 dma64 = 1;
950 }
Yang Hongyang284901a2009-04-06 19:01:15 -0700951 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 return -EINVAL;
953
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200954 /* adapter only supports message blocks below 4GB */
Yang Hongyang284901a2009-04-06 19:01:15 -0700955 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 base_addr0_phys = pci_resource_start(pDev,0);
958 hba_map0_area_size = pci_resource_len(pDev,0);
959
960 // Check if standard PCI card or single BAR Raptor
961 if(pDev->device == PCI_DPT_DEVICE_ID){
962 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
963 // Raptor card with this device id needs 4M
964 hba_map0_area_size = 0x400000;
965 } else { // Not Raptor - it is a PCI card
966 if(hba_map0_area_size > 0x100000 ){
967 hba_map0_area_size = 0x100000;
968 }
969 }
970 } else {// Raptor split BAR config
971 // Use BAR1 in this configuration
972 base_addr1_phys = pci_resource_start(pDev,1);
973 hba_map1_area_size = pci_resource_len(pDev,1);
974 raptorFlag = TRUE;
975 }
976
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200977#if BITS_PER_LONG == 64
978 /*
979 * The original Adaptec 64 bit driver has this comment here:
980 * "x86_64 machines need more optimal mappings"
981 *
982 * I assume some HBAs report ridiculously large mappings
983 * and we need to limit them on platforms with IOMMUs.
984 */
985 if (raptorFlag == TRUE) {
986 if (hba_map0_area_size > 128)
987 hba_map0_area_size = 128;
988 if (hba_map1_area_size > 524288)
989 hba_map1_area_size = 524288;
990 } else {
991 if (hba_map0_area_size > 524288)
992 hba_map0_area_size = 524288;
993 }
994#endif
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
997 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -0500998 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 PERROR("dpti: adpt_config_hba: io remap failed\n");
1000 return -EINVAL;
1001 }
1002
1003 if(raptorFlag == TRUE) {
1004 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1005 if (!msg_addr_virt) {
1006 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1007 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001008 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 return -EINVAL;
1010 }
1011 } else {
1012 msg_addr_virt = base_addr_virt;
1013 }
1014
1015 // Allocate and zero the data structure
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02001016 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1017 if (!pHba) {
1018 if (msg_addr_virt != base_addr_virt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 iounmap(msg_addr_virt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001021 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 return -ENOMEM;
1023 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Arjan van de Ven0b950672006-01-11 13:16:10 +01001025 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 if(hba_chain != NULL){
1028 for(p = hba_chain; p->next; p = p->next);
1029 p->next = pHba;
1030 } else {
1031 hba_chain = pHba;
1032 }
1033 pHba->next = NULL;
1034 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -07001035 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 hba_count++;
1037
Arjan van de Ven0b950672006-01-11 13:16:10 +01001038 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 pHba->pDev = pDev;
1041 pHba->base_addr_phys = base_addr0_phys;
1042
1043 // Set up the Virtual Base Address of the I2O Device
1044 pHba->base_addr_virt = base_addr_virt;
1045 pHba->msg_addr_virt = msg_addr_virt;
1046 pHba->irq_mask = base_addr_virt+0x30;
1047 pHba->post_port = base_addr_virt+0x40;
1048 pHba->reply_port = base_addr_virt+0x44;
1049
1050 pHba->hrt = NULL;
1051 pHba->lct = NULL;
1052 pHba->lct_size = 0;
1053 pHba->status_block = NULL;
1054 pHba->post_count = 0;
1055 pHba->state = DPTI_STATE_RESET;
1056 pHba->pDev = pDev;
1057 pHba->devices = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001058 pHba->dma64 = dma64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
1060 // Initializing the spinlocks
1061 spin_lock_init(&pHba->state_lock);
1062 spin_lock_init(&adpt_post_wait_lock);
1063
1064 if(raptorFlag == 0){
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001065 printk(KERN_INFO "Adaptec I2O RAID controller"
1066 " %d at %p size=%x irq=%d%s\n",
1067 hba_count-1, base_addr_virt,
1068 hba_map0_area_size, pDev->irq,
1069 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 } else {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001071 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1072 hba_count-1, pDev->irq,
1073 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1075 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1076 }
1077
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001078 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1080 adpt_i2o_delete_hba(pHba);
1081 return -EINVAL;
1082 }
1083
1084 return 0;
1085}
1086
1087
1088static void adpt_i2o_delete_hba(adpt_hba* pHba)
1089{
1090 adpt_hba* p1;
1091 adpt_hba* p2;
1092 struct i2o_device* d;
1093 struct i2o_device* next;
1094 int i;
1095 int j;
1096 struct adpt_device* pDev;
1097 struct adpt_device* pNext;
1098
1099
Arjan van de Ven0b950672006-01-11 13:16:10 +01001100 mutex_lock(&adpt_configuration_lock);
Andrew Morton24601bb2007-12-10 15:49:20 -08001101 // scsi_unregister calls our adpt_release which
1102 // does a quiese
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 if(pHba->host){
1104 free_irq(pHba->host->irq, pHba);
1105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 p2 = NULL;
1107 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1108 if(p1 == pHba) {
1109 if(p2) {
1110 p2->next = p1->next;
1111 } else {
1112 hba_chain = p1->next;
1113 }
1114 break;
1115 }
1116 }
1117
1118 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001119 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001122 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1124 iounmap(pHba->msg_addr_virt);
1125 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001126 if(pHba->FwDebugBuffer_P)
1127 iounmap(pHba->FwDebugBuffer_P);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001128 if(pHba->hrt) {
1129 dma_free_coherent(&pHba->pDev->dev,
1130 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1131 pHba->hrt, pHba->hrt_pa);
1132 }
1133 if(pHba->lct) {
1134 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1135 pHba->lct, pHba->lct_pa);
1136 }
1137 if(pHba->status_block) {
1138 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1139 pHba->status_block, pHba->status_block_pa);
1140 }
1141 if(pHba->reply_pool) {
1142 dma_free_coherent(&pHba->pDev->dev,
1143 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1144 pHba->reply_pool, pHba->reply_pool_pa);
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 for(d = pHba->devices; d ; d = next){
1148 next = d->next;
1149 kfree(d);
1150 }
1151 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1152 for(j = 0; j < MAX_ID; j++){
1153 if(pHba->channel[i].device[j] != NULL){
1154 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1155 pNext = pDev->next_lun;
1156 kfree(pDev);
1157 }
1158 }
1159 }
1160 }
Alan Coxa07f3532006-09-15 15:34:32 +01001161 pci_dev_put(pHba->pDev);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001162 if (adpt_sysfs_class)
1163 device_destroy(adpt_sysfs_class,
1164 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
Dan Carpenter229bab62010-03-15 11:26:56 +03001165 kfree(pHba);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 if(hba_count <= 0){
1168 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001169 if (adpt_sysfs_class) {
1170 class_destroy(adpt_sysfs_class);
1171 adpt_sysfs_class = NULL;
1172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 }
1174}
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1177{
1178 struct adpt_device* d;
1179
1180 if(chan < 0 || chan >= MAX_CHANNEL)
1181 return NULL;
1182
1183 if( pHba->channel[chan].device == NULL){
1184 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1185 return NULL;
1186 }
1187
1188 d = pHba->channel[chan].device[id];
1189 if(!d || d->tid == 0) {
1190 return NULL;
1191 }
1192
1193 /* If it is the only lun at that address then this should match*/
1194 if(d->scsi_lun == lun){
1195 return d;
1196 }
1197
1198 /* else we need to look through all the luns */
1199 for(d=d->next_lun ; d ; d = d->next_lun){
1200 if(d->scsi_lun == lun){
1201 return d;
1202 }
1203 }
1204 return NULL;
1205}
1206
1207
1208static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1209{
1210 // I used my own version of the WAIT_QUEUE_HEAD
1211 // to handle some version differences
1212 // When embedded in the kernel this could go back to the vanilla one
1213 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1214 int status = 0;
1215 ulong flags = 0;
1216 struct adpt_i2o_post_wait_data *p1, *p2;
1217 struct adpt_i2o_post_wait_data *wait_data =
Julia Lawallda2907f2010-05-30 15:49:22 +02001218 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
Andrew Morton4452ea52005-06-23 00:10:26 -07001219 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
Andrew Morton4452ea52005-06-23 00:10:26 -07001221 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 /*
1225 * The spin locking is needed to keep anyone from playing
1226 * with the queue pointers and id while we do the same
1227 */
1228 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1229 // TODO we need a MORE unique way of getting ids
1230 // to support async LCT get
1231 wait_data->next = adpt_post_wait_queue;
1232 adpt_post_wait_queue = wait_data;
1233 adpt_post_wait_id++;
1234 adpt_post_wait_id &= 0x7fff;
1235 wait_data->id = adpt_post_wait_id;
1236 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1237
1238 wait_data->wq = &adpt_wq_i2o_post;
1239 wait_data->status = -ETIMEDOUT;
1240
Andrew Morton4452ea52005-06-23 00:10:26 -07001241 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
1243 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1244 timeout *= HZ;
1245 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1246 set_current_state(TASK_INTERRUPTIBLE);
1247 if(pHba->host)
1248 spin_unlock_irq(pHba->host->host_lock);
1249 if (!timeout)
1250 schedule();
1251 else{
1252 timeout = schedule_timeout(timeout);
1253 if (timeout == 0) {
1254 // I/O issued, but cannot get result in
1255 // specified time. Freeing resorces is
1256 // dangerous.
1257 status = -ETIME;
1258 }
1259 }
1260 if(pHba->host)
1261 spin_lock_irq(pHba->host->host_lock);
1262 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001263 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
1265 if(status == -ETIMEDOUT){
1266 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1267 // We will have to free the wait_data memory during shutdown
1268 return status;
1269 }
1270
1271 /* Remove the entry from the queue. */
1272 p2 = NULL;
1273 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1274 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1275 if(p1 == wait_data) {
1276 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1277 status = -EOPNOTSUPP;
1278 }
1279 if(p2) {
1280 p2->next = p1->next;
1281 } else {
1282 adpt_post_wait_queue = p1->next;
1283 }
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1288
1289 kfree(wait_data);
1290
1291 return status;
1292}
1293
1294
1295static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1296{
1297
1298 u32 m = EMPTY_QUEUE;
1299 u32 __iomem *msg;
1300 ulong timeout = jiffies + 30*HZ;
1301 do {
1302 rmb();
1303 m = readl(pHba->post_port);
1304 if (m != EMPTY_QUEUE) {
1305 break;
1306 }
1307 if(time_after(jiffies,timeout)){
1308 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1309 return -ETIMEDOUT;
1310 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001311 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 } while(m == EMPTY_QUEUE);
1313
1314 msg = pHba->msg_addr_virt + m;
1315 memcpy_toio(msg, data, len);
1316 wmb();
1317
1318 //post message
1319 writel(m, pHba->post_port);
1320 wmb();
1321
1322 return 0;
1323}
1324
1325
1326static void adpt_i2o_post_wait_complete(u32 context, int status)
1327{
1328 struct adpt_i2o_post_wait_data *p1 = NULL;
1329 /*
1330 * We need to search through the adpt_post_wait
1331 * queue to see if the given message is still
1332 * outstanding. If not, it means that the IOP
1333 * took longer to respond to the message than we
1334 * had allowed and timer has already expired.
1335 * Not much we can do about that except log
1336 * it for debug purposes, increase timeout, and recompile
1337 *
1338 * Lock needed to keep anyone from moving queue pointers
1339 * around while we're looking through them.
1340 */
1341
1342 context &= 0x7fff;
1343
1344 spin_lock(&adpt_post_wait_lock);
1345 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1346 if(p1->id == context) {
1347 p1->status = status;
1348 spin_unlock(&adpt_post_wait_lock);
1349 wake_up_interruptible(p1->wq);
1350 return;
1351 }
1352 }
1353 spin_unlock(&adpt_post_wait_lock);
1354 // If this happens we lose commands that probably really completed
1355 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1356 printk(KERN_DEBUG" Tasks in wait queue:\n");
1357 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1358 printk(KERN_DEBUG" %d\n",p1->id);
1359 }
1360 return;
1361}
1362
1363static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1364{
1365 u32 msg[8];
1366 u8* status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001367 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 u32 m = EMPTY_QUEUE ;
1369 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1370
1371 if(pHba->initialized == FALSE) { // First time reset should be quick
1372 timeout = jiffies + (25*HZ);
1373 } else {
1374 adpt_i2o_quiesce_hba(pHba);
1375 }
1376
1377 do {
1378 rmb();
1379 m = readl(pHba->post_port);
1380 if (m != EMPTY_QUEUE) {
1381 break;
1382 }
1383 if(time_after(jiffies,timeout)){
1384 printk(KERN_WARNING"Timeout waiting for message!\n");
1385 return -ETIMEDOUT;
1386 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001387 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 } while (m == EMPTY_QUEUE);
1389
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001390 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 if(status == NULL) {
1392 adpt_send_nop(pHba, m);
1393 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1394 return -ENOMEM;
1395 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001396 memset(status,0,4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1399 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1400 msg[2]=0;
1401 msg[3]=0;
1402 msg[4]=0;
1403 msg[5]=0;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001404 msg[6]=dma_low(addr);
1405 msg[7]=dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
1407 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1408 wmb();
1409 writel(m, pHba->post_port);
1410 wmb();
1411
1412 while(*status == 0){
1413 if(time_after(jiffies,timeout)){
1414 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001415 /* We lose 4 bytes of "status" here, but we cannot
1416 free these because controller may awake and corrupt
1417 those bytes at any time */
1418 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 return -ETIMEDOUT;
1420 }
1421 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001422 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 }
1424
1425 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1426 PDEBUG("%s: Reset in progress...\n", pHba->name);
1427 // Here we wait for message frame to become available
1428 // indicated that reset has finished
1429 do {
1430 rmb();
1431 m = readl(pHba->post_port);
1432 if (m != EMPTY_QUEUE) {
1433 break;
1434 }
1435 if(time_after(jiffies,timeout)){
1436 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001437 /* We lose 4 bytes of "status" here, but we
1438 cannot free these because controller may
1439 awake and corrupt those bytes at any time */
1440 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 return -ETIMEDOUT;
1442 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001443 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 } while (m == EMPTY_QUEUE);
1445 // Flush the offset
1446 adpt_send_nop(pHba, m);
1447 }
1448 adpt_i2o_status_get(pHba);
1449 if(*status == 0x02 ||
1450 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1451 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1452 pHba->name);
1453 } else {
1454 PDEBUG("%s: Reset completed.\n", pHba->name);
1455 }
1456
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001457 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458#ifdef UARTDELAY
1459 // This delay is to allow someone attached to the card through the debug UART to
1460 // set up the dump levels that they want before the rest of the initialization sequence
1461 adpt_delay(20000);
1462#endif
1463 return 0;
1464}
1465
1466
1467static int adpt_i2o_parse_lct(adpt_hba* pHba)
1468{
1469 int i;
1470 int max;
1471 int tid;
1472 struct i2o_device *d;
1473 i2o_lct *lct = pHba->lct;
1474 u8 bus_no = 0;
1475 s16 scsi_id;
1476 s16 scsi_lun;
1477 u32 buf[10]; // larger than 7, or 8 ...
1478 struct adpt_device* pDev;
1479
1480 if (lct == NULL) {
1481 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1482 return -1;
1483 }
1484
1485 max = lct->table_size;
1486 max -= 3;
1487 max /= 9;
1488
1489 for(i=0;i<max;i++) {
1490 if( lct->lct_entry[i].user_tid != 0xfff){
1491 /*
1492 * If we have hidden devices, we need to inform the upper layers about
1493 * the possible maximum id reference to handle device access when
1494 * an array is disassembled. This code has no other purpose but to
1495 * allow us future access to devices that are currently hidden
1496 * behind arrays, hotspares or have not been configured (JBOD mode).
1497 */
1498 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1499 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1500 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1501 continue;
1502 }
1503 tid = lct->lct_entry[i].tid;
1504 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1505 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1506 continue;
1507 }
1508 bus_no = buf[0]>>16;
1509 scsi_id = buf[1];
1510 scsi_lun = (buf[2]>>8 )&0xff;
1511 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1512 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1513 continue;
1514 }
1515 if (scsi_id >= MAX_ID){
1516 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1517 continue;
1518 }
1519 if(bus_no > pHba->top_scsi_channel){
1520 pHba->top_scsi_channel = bus_no;
1521 }
1522 if(scsi_id > pHba->top_scsi_id){
1523 pHba->top_scsi_id = scsi_id;
1524 }
1525 if(scsi_lun > pHba->top_scsi_lun){
1526 pHba->top_scsi_lun = scsi_lun;
1527 }
1528 continue;
1529 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001530 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 if(d==NULL)
1532 {
1533 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1534 return -ENOMEM;
1535 }
1536
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001537 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 d->next = NULL;
1539
1540 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1541
1542 d->flags = 0;
1543 tid = d->lct_data.tid;
1544 adpt_i2o_report_hba_unit(pHba, d);
1545 adpt_i2o_install_device(pHba, d);
1546 }
1547 bus_no = 0;
1548 for(d = pHba->devices; d ; d = d->next) {
1549 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1550 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1551 tid = d->lct_data.tid;
1552 // TODO get the bus_no from hrt-but for now they are in order
1553 //bus_no =
1554 if(bus_no > pHba->top_scsi_channel){
1555 pHba->top_scsi_channel = bus_no;
1556 }
1557 pHba->channel[bus_no].type = d->lct_data.class_id;
1558 pHba->channel[bus_no].tid = tid;
1559 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1560 {
1561 pHba->channel[bus_no].scsi_id = buf[1];
1562 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1563 }
1564 // TODO remove - this is just until we get from hrt
1565 bus_no++;
1566 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1567 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1568 break;
1569 }
1570 }
1571 }
1572
1573 // Setup adpt_device table
1574 for(d = pHba->devices; d ; d = d->next) {
1575 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1576 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1577 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1578
1579 tid = d->lct_data.tid;
1580 scsi_id = -1;
1581 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1582 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1583 bus_no = buf[0]>>16;
1584 scsi_id = buf[1];
1585 scsi_lun = (buf[2]>>8 )&0xff;
1586 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1587 continue;
1588 }
1589 if (scsi_id >= MAX_ID) {
1590 continue;
1591 }
1592 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301593 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 if(pDev == NULL) {
1595 return -ENOMEM;
1596 }
1597 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 } else {
1599 for( pDev = pHba->channel[bus_no].device[scsi_id];
1600 pDev->next_lun; pDev = pDev->next_lun){
1601 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301602 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 if(pDev->next_lun == NULL) {
1604 return -ENOMEM;
1605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 pDev = pDev->next_lun;
1607 }
1608 pDev->tid = tid;
1609 pDev->scsi_channel = bus_no;
1610 pDev->scsi_id = scsi_id;
1611 pDev->scsi_lun = scsi_lun;
1612 pDev->pI2o_dev = d;
1613 d->owner = pDev;
1614 pDev->type = (buf[0])&0xff;
1615 pDev->flags = (buf[0]>>8)&0xff;
1616 if(scsi_id > pHba->top_scsi_id){
1617 pHba->top_scsi_id = scsi_id;
1618 }
1619 if(scsi_lun > pHba->top_scsi_lun){
1620 pHba->top_scsi_lun = scsi_lun;
1621 }
1622 }
1623 if(scsi_id == -1){
1624 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1625 d->lct_data.identity_tag);
1626 }
1627 }
1628 }
1629 return 0;
1630}
1631
1632
1633/*
1634 * Each I2O controller has a chain of devices on it - these match
1635 * the useful parts of the LCT of the board.
1636 */
1637
1638static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1639{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001640 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 d->controller=pHba;
1642 d->owner=NULL;
1643 d->next=pHba->devices;
1644 d->prev=NULL;
1645 if (pHba->devices != NULL){
1646 pHba->devices->prev=d;
1647 }
1648 pHba->devices=d;
1649 *d->dev_name = 0;
1650
Arjan van de Ven0b950672006-01-11 13:16:10 +01001651 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 return 0;
1653}
1654
1655static int adpt_open(struct inode *inode, struct file *file)
1656{
1657 int minor;
1658 adpt_hba* pHba;
1659
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001660 mutex_lock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 //TODO check for root access
1662 //
1663 minor = iminor(inode);
1664 if (minor >= hba_count) {
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001665 mutex_unlock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 return -ENXIO;
1667 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001668 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1670 if (pHba->unit == minor) {
1671 break; /* found adapter */
1672 }
1673 }
1674 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001675 mutex_unlock(&adpt_configuration_lock);
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001676 mutex_unlock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 return -ENXIO;
1678 }
1679
1680// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001681 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682// return -EBUSY;
1683// }
1684
1685 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001686 mutex_unlock(&adpt_configuration_lock);
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001687 mutex_unlock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 return 0;
1690}
1691
1692static int adpt_close(struct inode *inode, struct file *file)
1693{
1694 int minor;
1695 adpt_hba* pHba;
1696
1697 minor = iminor(inode);
1698 if (minor >= hba_count) {
1699 return -ENXIO;
1700 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001701 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1703 if (pHba->unit == minor) {
1704 break; /* found adapter */
1705 }
1706 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001707 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 if (pHba == NULL) {
1709 return -ENXIO;
1710 }
1711
1712 pHba->in_use = 0;
1713
1714 return 0;
1715}
1716
1717
1718static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1719{
1720 u32 msg[MAX_MESSAGE_SIZE];
1721 u32* reply = NULL;
1722 u32 size = 0;
1723 u32 reply_size = 0;
1724 u32 __user *user_msg = arg;
1725 u32 __user * user_reply = NULL;
1726 void *sg_list[pHba->sg_tablesize];
1727 u32 sg_offset = 0;
1728 u32 sg_count = 0;
1729 int sg_index = 0;
1730 u32 i = 0;
1731 u32 rcode = 0;
1732 void *p = NULL;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001733 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 ulong flags = 0;
1735
1736 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1737 // get user msg size in u32s
1738 if(get_user(size, &user_msg[0])){
1739 return -EFAULT;
1740 }
1741 size = size>>16;
1742
1743 user_reply = &user_msg[size];
1744 if(size > MAX_MESSAGE_SIZE){
1745 return -EFAULT;
1746 }
1747 size *= 4; // Convert to bytes
1748
1749 /* Copy in the user's I2O command */
1750 if(copy_from_user(msg, user_msg, size)) {
1751 return -EFAULT;
1752 }
1753 get_user(reply_size, &user_reply[0]);
1754 reply_size = reply_size>>16;
1755 if(reply_size > REPLY_FRAME_SIZE){
1756 reply_size = REPLY_FRAME_SIZE;
1757 }
1758 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301759 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 if(reply == NULL) {
1761 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1762 return -ENOMEM;
1763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 sg_offset = (msg[0]>>4)&0xf;
1765 msg[2] = 0x40000000; // IOCTL context
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001766 msg[3] = adpt_ioctl_to_context(pHba, reply);
1767 if (msg[3] == (u32)-1)
1768 return -EBUSY;
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1771 if(sg_offset) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001772 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1774 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1775 if (sg_count > pHba->sg_tablesize){
1776 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1777 kfree (reply);
1778 return -EINVAL;
1779 }
1780
1781 for(i = 0; i < sg_count; i++) {
1782 int sg_size;
1783
1784 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1785 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1786 rcode = -EINVAL;
1787 goto cleanup;
1788 }
1789 sg_size = sg[i].flag_count & 0xffffff;
1790 /* Allocate memory for the transfer */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001791 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 if(!p) {
1793 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1794 pHba->name,sg_size,i,sg_count);
1795 rcode = -ENOMEM;
1796 goto cleanup;
1797 }
1798 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1799 /* Copy in the user's SG buffer if necessary */
1800 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001801 // sg_simple_element API is 32 bit
1802 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1804 rcode = -EFAULT;
1805 goto cleanup;
1806 }
1807 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001808 /* sg_simple_element API is 32 bit, but addr < 4GB */
1809 sg[i].addr_bus = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811 }
1812
1813 do {
1814 if(pHba->host)
1815 spin_lock_irqsave(pHba->host->host_lock, flags);
1816 // This state stops any new commands from enterring the
1817 // controller while processing the ioctl
1818// pHba->state |= DPTI_STATE_IOCTL;
1819// We can't set this now - The scsi subsystem sets host_blocked and
1820// the queue empties and stops. We need a way to restart the queue
1821 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1822 if (rcode != 0)
1823 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1824 rcode, reply);
1825// pHba->state &= ~DPTI_STATE_IOCTL;
1826 if(pHba->host)
1827 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1828 } while(rcode == -ETIMEDOUT);
1829
1830 if(rcode){
1831 goto cleanup;
1832 }
1833
1834 if(sg_offset) {
1835 /* Copy back the Scatter Gather buffers back to user space */
1836 u32 j;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001837 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 struct sg_simple_element* sg;
1839 int sg_size;
1840
1841 // re-acquire the original message to handle correctly the sg copy operation
1842 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1843 // get user msg size in u32s
1844 if(get_user(size, &user_msg[0])){
1845 rcode = -EFAULT;
1846 goto cleanup;
1847 }
1848 size = size>>16;
1849 size *= 4;
Alan Coxef7562b2009-10-27 15:35:35 +00001850 if (size > MAX_MESSAGE_SIZE) {
OGAWA Hirofumiaefba412009-10-30 17:02:31 +09001851 rcode = -EINVAL;
Alan Coxef7562b2009-10-27 15:35:35 +00001852 goto cleanup;
1853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 /* Copy in the user's I2O command */
1855 if (copy_from_user (msg, user_msg, size)) {
1856 rcode = -EFAULT;
1857 goto cleanup;
1858 }
1859 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1860
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001861 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 sg = (struct sg_simple_element*)(msg + sg_offset);
1863 for (j = 0; j < sg_count; j++) {
1864 /* Copy out the SG list to user's buffer if necessary */
1865 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1866 sg_size = sg[j].flag_count & 0xffffff;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001867 // sg_simple_element API is 32 bit
1868 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1870 rcode = -EFAULT;
1871 goto cleanup;
1872 }
1873 }
1874 }
1875 }
1876
1877 /* Copy back the reply to user space */
1878 if (reply_size) {
1879 // we wrote our own values for context - now restore the user supplied ones
1880 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1881 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1882 rcode = -EFAULT;
1883 }
1884 if(copy_to_user(user_reply, reply, reply_size)) {
1885 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1886 rcode = -EFAULT;
1887 }
1888 }
1889
1890
1891cleanup:
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001892 if (rcode != -ETIME && rcode != -EINTR) {
1893 struct sg_simple_element *sg =
1894 (struct sg_simple_element*) (msg +sg_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 kfree (reply);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001896 while(sg_index) {
1897 if(sg_list[--sg_index]) {
1898 dma_free_coherent(&pHba->pDev->dev,
1899 sg[sg_index].flag_count & 0xffffff,
1900 sg_list[sg_index],
1901 sg[sg_index].addr_bus);
1902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 }
1904 }
1905 return rcode;
1906}
1907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908#if defined __ia64__
1909static void adpt_ia64_info(sysInfo_S* si)
1910{
1911 // This is all the info we need for now
1912 // We will add more info as our new
1913 // managmenent utility requires it
1914 si->processorType = PROC_IA64;
1915}
1916#endif
1917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918#if defined __sparc__
1919static void adpt_sparc_info(sysInfo_S* si)
1920{
1921 // This is all the info we need for now
1922 // We will add more info as our new
1923 // managmenent utility requires it
1924 si->processorType = PROC_ULTRASPARC;
1925}
1926#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927#if defined __alpha__
1928static void adpt_alpha_info(sysInfo_S* si)
1929{
1930 // This is all the info we need for now
1931 // We will add more info as our new
1932 // managmenent utility requires it
1933 si->processorType = PROC_ALPHA;
1934}
1935#endif
1936
1937#if defined __i386__
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938static void adpt_i386_info(sysInfo_S* si)
1939{
1940 // This is all the info we need for now
1941 // We will add more info as our new
1942 // managmenent utility requires it
1943 switch (boot_cpu_data.x86) {
1944 case CPU_386:
1945 si->processorType = PROC_386;
1946 break;
1947 case CPU_486:
1948 si->processorType = PROC_486;
1949 break;
1950 case CPU_586:
1951 si->processorType = PROC_PENTIUM;
1952 break;
1953 default: // Just in case
1954 si->processorType = PROC_PENTIUM;
1955 break;
1956 }
1957}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958#endif
1959
Andrew Morton8b2cc912008-05-06 20:42:42 -07001960/*
1961 * This routine returns information about the system. This does not effect
1962 * any logic and if the info is wrong - it doesn't matter.
1963 */
1964
1965/* Get all the info we can not get from kernel services */
1966static int adpt_system_info(void __user *buffer)
1967{
1968 sysInfo_S si;
1969
1970 memset(&si, 0, sizeof(si));
1971
1972 si.osType = OS_LINUX;
1973 si.osMajorVersion = 0;
1974 si.osMinorVersion = 0;
1975 si.osRevision = 0;
1976 si.busType = SI_PCI_BUS;
1977 si.processorFamily = DPTI_sig.dsProcessorFamily;
1978
1979#if defined __i386__
1980 adpt_i386_info(&si);
1981#elif defined (__ia64__)
1982 adpt_ia64_info(&si);
1983#elif defined(__sparc__)
1984 adpt_sparc_info(&si);
1985#elif defined (__alpha__)
1986 adpt_alpha_info(&si);
1987#else
1988 si.processorType = 0xff ;
1989#endif
1990 if (copy_to_user(buffer, &si, sizeof(si))){
1991 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1992 return -EFAULT;
1993 }
1994
1995 return 0;
1996}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Arnd Bergmannf4927c42010-04-27 00:24:01 +02001998static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999{
2000 int minor;
2001 int error = 0;
2002 adpt_hba* pHba;
2003 ulong flags = 0;
2004 void __user *argp = (void __user *)arg;
2005
2006 minor = iminor(inode);
2007 if (minor >= DPTI_MAX_HBA){
2008 return -ENXIO;
2009 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002010 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2012 if (pHba->unit == minor) {
2013 break; /* found adapter */
2014 }
2015 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002016 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 if(pHba == NULL){
2018 return -ENXIO;
2019 }
2020
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002021 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2022 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024 switch (cmd) {
2025 // TODO: handle 3 cases
2026 case DPT_SIGNATURE:
2027 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2028 return -EFAULT;
2029 }
2030 break;
2031 case I2OUSRCMD:
2032 return adpt_i2o_passthru(pHba, argp);
2033
2034 case DPT_CTRLINFO:{
2035 drvrHBAinfo_S HbaInfo;
2036
2037#define FLG_OSD_PCI_VALID 0x0001
2038#define FLG_OSD_DMA 0x0002
2039#define FLG_OSD_I2O 0x0004
2040 memset(&HbaInfo, 0, sizeof(HbaInfo));
2041 HbaInfo.drvrHBAnum = pHba->unit;
2042 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2043 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2044 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2045 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2046 HbaInfo.Interrupt = pHba->pDev->irq;
2047 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2048 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2049 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2050 return -EFAULT;
2051 }
2052 break;
2053 }
2054 case DPT_SYSINFO:
2055 return adpt_system_info(argp);
2056 case DPT_BLINKLED:{
2057 u32 value;
2058 value = (u32)adpt_read_blink_led(pHba);
2059 if (copy_to_user(argp, &value, sizeof(value))) {
2060 return -EFAULT;
2061 }
2062 break;
2063 }
2064 case I2ORESETCMD:
2065 if(pHba->host)
2066 spin_lock_irqsave(pHba->host->host_lock, flags);
2067 adpt_hba_reset(pHba);
2068 if(pHba->host)
2069 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2070 break;
2071 case I2ORESCANCMD:
2072 adpt_rescan(pHba);
2073 break;
2074 default:
2075 return -EINVAL;
2076 }
2077
2078 return error;
2079}
2080
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002081static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2082{
2083 struct inode *inode;
2084 long ret;
2085
Al Viro496ad9a2013-01-23 17:07:38 -05002086 inode = file_inode(file);
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002087
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002088 mutex_lock(&adpt_mutex);
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002089 ret = adpt_ioctl(inode, file, cmd, arg);
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002090 mutex_unlock(&adpt_mutex);
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002091
2092 return ret;
2093}
2094
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002095#ifdef CONFIG_COMPAT
2096static long compat_adpt_ioctl(struct file *file,
2097 unsigned int cmd, unsigned long arg)
2098{
2099 struct inode *inode;
2100 long ret;
2101
Al Viro496ad9a2013-01-23 17:07:38 -05002102 inode = file_inode(file);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002103
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002104 mutex_lock(&adpt_mutex);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002105
2106 switch(cmd) {
2107 case DPT_SIGNATURE:
2108 case I2OUSRCMD:
2109 case DPT_CTRLINFO:
2110 case DPT_SYSINFO:
2111 case DPT_BLINKLED:
2112 case I2ORESETCMD:
2113 case I2ORESCANCMD:
2114 case (DPT_TARGET_BUSY & 0xFFFF):
2115 case DPT_TARGET_BUSY:
2116 ret = adpt_ioctl(inode, file, cmd, arg);
2117 break;
2118 default:
2119 ret = -ENOIOCTLCMD;
2120 }
2121
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002122 mutex_unlock(&adpt_mutex);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002123
2124 return ret;
2125}
2126#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
David Howells7d12e782006-10-05 14:55:46 +01002128static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129{
2130 struct scsi_cmnd* cmd;
2131 adpt_hba* pHba = dev_id;
2132 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002133 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 u32 status=0;
2135 u32 context;
2136 ulong flags = 0;
2137 int handled = 0;
2138
2139 if (pHba == NULL){
2140 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2141 return IRQ_NONE;
2142 }
2143 if(pHba->host)
2144 spin_lock_irqsave(pHba->host->host_lock, flags);
2145
2146 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2147 m = readl(pHba->reply_port);
2148 if(m == EMPTY_QUEUE){
2149 // Try twice then give up
2150 rmb();
2151 m = readl(pHba->reply_port);
2152 if(m == EMPTY_QUEUE){
2153 // This really should not happen
2154 printk(KERN_ERR"dpti: Could not get reply frame\n");
2155 goto out;
2156 }
2157 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002158 if (pHba->reply_pool_pa <= m &&
2159 m < pHba->reply_pool_pa +
2160 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2161 reply = (u8 *)pHba->reply_pool +
2162 (m - pHba->reply_pool_pa);
2163 } else {
2164 /* Ick, we should *never* be here */
2165 printk(KERN_ERR "dpti: reply frame not from pool\n");
2166 reply = (u8 *)bus_to_virt(m);
2167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169 if (readl(reply) & MSG_FAIL) {
2170 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002171 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 u32 old_context;
2173 PDEBUG("%s: Failed message\n",pHba->name);
2174 if(old_m >= 0x100000){
2175 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2176 writel(m,pHba->reply_port);
2177 continue;
2178 }
2179 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002180 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 old_context = readl(msg+12);
2182 writel(old_context, reply+12);
2183 adpt_send_nop(pHba, old_m);
2184 }
2185 context = readl(reply+8);
2186 if(context & 0x40000000){ // IOCTL
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002187 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002188 if( p != NULL) {
2189 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 }
2191 // All IOCTLs will also be post wait
2192 }
2193 if(context & 0x80000000){ // Post wait message
2194 status = readl(reply+16);
2195 if(status >> 24){
2196 status &= 0xffff; /* Get detail status */
2197 } else {
2198 status = I2O_POST_WAIT_OK;
2199 }
2200 if(!(context & 0x40000000)) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002201 cmd = adpt_cmd_from_context(pHba,
2202 readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 if(cmd != NULL) {
2204 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2205 }
2206 }
2207 adpt_i2o_post_wait_complete(context, status);
2208 } else { // SCSI message
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002209 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 if(cmd != NULL){
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002211 scsi_dma_unmap(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 if(cmd->serial_number != 0) { // If not timedout
2213 adpt_i2o_to_scsi(reply, cmd);
2214 }
2215 }
2216 }
2217 writel(m, pHba->reply_port);
2218 wmb();
2219 rmb();
2220 }
2221 handled = 1;
2222out: if(pHba->host)
2223 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2224 return IRQ_RETVAL(handled);
2225}
2226
2227static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2228{
2229 int i;
2230 u32 msg[MAX_MESSAGE_SIZE];
2231 u32* mptr;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002232 u32* lptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 u32 *lenptr;
2234 int direction;
2235 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002236 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 u32 len;
2238 u32 reqlen;
2239 s32 rcode;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002240 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002243 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 direction = 0x00000000;
2245
2246 scsidir = 0x00000000; // DATA NO XFER
2247 if(len) {
2248 /*
2249 * Set SCBFlags to indicate if data is being transferred
2250 * in or out, or no data transfer
2251 * Note: Do not have to verify index is less than 0 since
2252 * cmd->cmnd[0] is an unsigned char
2253 */
2254 switch(cmd->sc_data_direction){
2255 case DMA_FROM_DEVICE:
2256 scsidir =0x40000000; // DATA IN (iop<--dev)
2257 break;
2258 case DMA_TO_DEVICE:
2259 direction=0x04000000; // SGL OUT
2260 scsidir =0x80000000; // DATA OUT (iop-->dev)
2261 break;
2262 case DMA_NONE:
2263 break;
2264 case DMA_BIDIRECTIONAL:
2265 scsidir =0x40000000; // DATA IN (iop<--dev)
2266 // Assume In - and continue;
2267 break;
2268 default:
2269 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2270 pHba->name, cmd->cmnd[0]);
2271 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2272 cmd->scsi_done(cmd);
2273 return 0;
2274 }
2275 }
2276 // msg[0] is set later
2277 // I2O_CMD_SCSI_EXEC
2278 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2279 msg[2] = 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002280 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 // Our cards use the transaction context as the tag for queueing
2282 // Adaptec/DPT Private stuff
2283 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2284 msg[5] = d->tid;
2285 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2286 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2287 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2288 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2289 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2290
2291 mptr=msg+7;
2292
2293 // Write SCSI command into the message - always 16 byte block
2294 memset(mptr, 0, 16);
2295 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2296 mptr+=4;
2297 lenptr=mptr++; /* Remember me - fill in when we know */
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002298 if (dpt_dma64(pHba)) {
2299 reqlen = 16; // SINGLE SGE
2300 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2301 *mptr++ = 1 << PAGE_SHIFT;
2302 } else {
2303 reqlen = 14; // SINGLE SGE
2304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002307 nseg = scsi_dma_map(cmd);
2308 BUG_ON(nseg < 0);
2309 if (nseg) {
2310 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
2312 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002313 scsi_for_each_sg(cmd, sg, nseg, i) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002314 lptr = mptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2316 len+=sg_dma_len(sg);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002317 addr = sg_dma_address(sg);
2318 *mptr++ = dma_low(addr);
2319 if (dpt_dma64(pHba))
2320 *mptr++ = dma_high(addr);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002321 /* Make this an end of list */
2322 if (i == nseg - 1)
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002323 *lptr = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 reqlen = mptr - msg;
2326 *lenptr = len;
2327
2328 if(cmd->underflow && len != cmd->underflow){
2329 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2330 len, cmd->underflow);
2331 }
2332 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002333 *lenptr = len = 0;
2334 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 }
2336
2337 /* Stick the headers on */
2338 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2339
2340 // Send it on it's way
2341 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2342 if (rcode == 0) {
2343 return 0;
2344 }
2345 return rcode;
2346}
2347
2348
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002349static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
Andrew Morton24601bb2007-12-10 15:49:20 -08002350{
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002351 struct Scsi_Host *host;
Andrew Morton24601bb2007-12-10 15:49:20 -08002352
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002353 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
Andrew Morton24601bb2007-12-10 15:49:20 -08002354 if (host == NULL) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002355 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
Andrew Morton24601bb2007-12-10 15:49:20 -08002356 return -1;
2357 }
2358 host->hostdata[0] = (unsigned long)pHba;
2359 pHba->host = host;
2360
2361 host->irq = pHba->pDev->irq;
2362 /* no IO ports, so don't have to set host->io_port and
2363 * host->n_io_port
2364 */
2365 host->io_port = 0;
2366 host->n_io_port = 0;
2367 /* see comments in scsi_host.h */
2368 host->max_id = 16;
2369 host->max_lun = 256;
2370 host->max_channel = pHba->top_scsi_channel + 1;
2371 host->cmd_per_lun = 1;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002372 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
Andrew Morton24601bb2007-12-10 15:49:20 -08002373 host->sg_tablesize = pHba->sg_tablesize;
2374 host->can_queue = pHba->post_fifo_size;
2375
2376 return 0;
2377}
2378
2379
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002380static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381{
2382 adpt_hba* pHba;
2383 u32 hba_status;
2384 u32 dev_status;
2385 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2386 // I know this would look cleaner if I just read bytes
2387 // but the model I have been using for all the rest of the
2388 // io is in 4 byte words - so I keep that model
2389 u16 detailed_status = readl(reply+16) &0xffff;
2390 dev_status = (detailed_status & 0xff);
2391 hba_status = detailed_status >> 8;
2392
2393 // calculate resid for sg
Miquel van Smoorenburgdf81d232008-11-05 00:09:12 +01002394 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
2396 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2397
2398 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2399
2400 if(!(reply_flags & MSG_FAIL)) {
2401 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2402 case I2O_SCSI_DSC_SUCCESS:
2403 cmd->result = (DID_OK << 16);
2404 // handle underflow
Miquel van Smoorenburgdf81d232008-11-05 00:09:12 +01002405 if (readl(reply+20) < cmd->underflow) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 cmd->result = (DID_ERROR <<16);
2407 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2408 }
2409 break;
2410 case I2O_SCSI_DSC_REQUEST_ABORTED:
2411 cmd->result = (DID_ABORT << 16);
2412 break;
2413 case I2O_SCSI_DSC_PATH_INVALID:
2414 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2415 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2416 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2417 case I2O_SCSI_DSC_NO_ADAPTER:
2418 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2419 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2420 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2421 cmd->result = (DID_TIME_OUT << 16);
2422 break;
2423 case I2O_SCSI_DSC_ADAPTER_BUSY:
2424 case I2O_SCSI_DSC_BUS_BUSY:
2425 cmd->result = (DID_BUS_BUSY << 16);
2426 break;
2427 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2428 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2429 cmd->result = (DID_RESET << 16);
2430 break;
2431 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2432 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2433 cmd->result = (DID_PARITY << 16);
2434 break;
2435 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2436 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2437 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2438 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2439 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2440 case I2O_SCSI_DSC_DATA_OVERRUN:
2441 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2442 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2443 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2444 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2445 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2446 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2447 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2448 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2449 case I2O_SCSI_DSC_INVALID_CDB:
2450 case I2O_SCSI_DSC_LUN_INVALID:
2451 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2452 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2453 case I2O_SCSI_DSC_NO_NEXUS:
2454 case I2O_SCSI_DSC_CDB_RECEIVED:
2455 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2456 case I2O_SCSI_DSC_QUEUE_FROZEN:
2457 case I2O_SCSI_DSC_REQUEST_INVALID:
2458 default:
2459 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2460 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2461 hba_status, dev_status, cmd->cmnd[0]);
2462 cmd->result = (DID_ERROR << 16);
2463 break;
2464 }
2465
2466 // copy over the request sense data if it was a check
2467 // condition status
Salyzyn, Markd814c512008-01-14 11:04:40 -08002468 if (dev_status == SAM_STAT_CHECK_CONDITION) {
FUJITA Tomonorib80ca4f2008-01-13 15:46:13 +09002469 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002471 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2473 cmd->sense_buffer[2] == DATA_PROTECT ){
2474 /* This is to handle an array failed */
2475 cmd->result = (DID_TIME_OUT << 16);
2476 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2477 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2478 hba_status, dev_status, cmd->cmnd[0]);
2479
2480 }
2481 }
2482 } else {
2483 /* In this condtion we could not talk to the tid
2484 * the card rejected it. We should signal a retry
2485 * for a limitted number of retries.
2486 */
2487 cmd->result = (DID_TIME_OUT << 16);
2488 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2489 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2490 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2491 }
2492
2493 cmd->result |= (dev_status);
2494
2495 if(cmd->scsi_done != NULL){
2496 cmd->scsi_done(cmd);
2497 }
2498 return cmd->result;
2499}
2500
2501
2502static s32 adpt_rescan(adpt_hba* pHba)
2503{
2504 s32 rcode;
2505 ulong flags = 0;
2506
2507 if(pHba->host)
2508 spin_lock_irqsave(pHba->host->host_lock, flags);
2509 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2510 goto out;
2511 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2512 goto out;
2513 rcode = 0;
2514out: if(pHba->host)
2515 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2516 return rcode;
2517}
2518
2519
2520static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2521{
2522 int i;
2523 int max;
2524 int tid;
2525 struct i2o_device *d;
2526 i2o_lct *lct = pHba->lct;
2527 u8 bus_no = 0;
2528 s16 scsi_id;
2529 s16 scsi_lun;
2530 u32 buf[10]; // at least 8 u32's
2531 struct adpt_device* pDev = NULL;
2532 struct i2o_device* pI2o_dev = NULL;
2533
2534 if (lct == NULL) {
2535 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2536 return -1;
2537 }
2538
2539 max = lct->table_size;
2540 max -= 3;
2541 max /= 9;
2542
2543 // Mark each drive as unscanned
2544 for (d = pHba->devices; d; d = d->next) {
2545 pDev =(struct adpt_device*) d->owner;
2546 if(!pDev){
2547 continue;
2548 }
2549 pDev->state |= DPTI_DEV_UNSCANNED;
2550 }
2551
2552 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2553
2554 for(i=0;i<max;i++) {
2555 if( lct->lct_entry[i].user_tid != 0xfff){
2556 continue;
2557 }
2558
2559 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2560 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2561 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2562 tid = lct->lct_entry[i].tid;
2563 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2564 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2565 continue;
2566 }
2567 bus_no = buf[0]>>16;
Dan Carpentere84d96d2010-07-15 10:20:19 +02002568 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2569 printk(KERN_WARNING
2570 "%s: Channel number %d out of range\n",
2571 pHba->name, bus_no);
2572 continue;
2573 }
2574
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 scsi_id = buf[1];
2576 scsi_lun = (buf[2]>>8 )&0xff;
2577 pDev = pHba->channel[bus_no].device[scsi_id];
2578 /* da lun */
2579 while(pDev) {
2580 if(pDev->scsi_lun == scsi_lun) {
2581 break;
2582 }
2583 pDev = pDev->next_lun;
2584 }
2585 if(!pDev ) { // Something new add it
Julia Lawallda2907f2010-05-30 15:49:22 +02002586 d = kmalloc(sizeof(struct i2o_device),
2587 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 if(d==NULL)
2589 {
2590 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2591 return -ENOMEM;
2592 }
2593
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002594 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 d->next = NULL;
2596
2597 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2598
2599 d->flags = 0;
2600 adpt_i2o_report_hba_unit(pHba, d);
2601 adpt_i2o_install_device(pHba, d);
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 pDev = pHba->channel[bus_no].device[scsi_id];
2604 if( pDev == NULL){
Julia Lawallda2907f2010-05-30 15:49:22 +02002605 pDev =
2606 kzalloc(sizeof(struct adpt_device),
2607 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 if(pDev == NULL) {
2609 return -ENOMEM;
2610 }
2611 pHba->channel[bus_no].device[scsi_id] = pDev;
2612 } else {
2613 while (pDev->next_lun) {
2614 pDev = pDev->next_lun;
2615 }
Julia Lawallda2907f2010-05-30 15:49:22 +02002616 pDev = pDev->next_lun =
2617 kzalloc(sizeof(struct adpt_device),
2618 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 if(pDev == NULL) {
2620 return -ENOMEM;
2621 }
2622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 pDev->tid = d->lct_data.tid;
2624 pDev->scsi_channel = bus_no;
2625 pDev->scsi_id = scsi_id;
2626 pDev->scsi_lun = scsi_lun;
2627 pDev->pI2o_dev = d;
2628 d->owner = pDev;
2629 pDev->type = (buf[0])&0xff;
2630 pDev->flags = (buf[0]>>8)&0xff;
2631 // Too late, SCSI system has made up it's mind, but what the hey ...
2632 if(scsi_id > pHba->top_scsi_id){
2633 pHba->top_scsi_id = scsi_id;
2634 }
2635 if(scsi_lun > pHba->top_scsi_lun){
2636 pHba->top_scsi_lun = scsi_lun;
2637 }
2638 continue;
2639 } // end of new i2o device
2640
2641 // We found an old device - check it
2642 while(pDev) {
2643 if(pDev->scsi_lun == scsi_lun) {
2644 if(!scsi_device_online(pDev->pScsi_dev)) {
2645 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2646 pHba->name,bus_no,scsi_id,scsi_lun);
2647 if (pDev->pScsi_dev) {
2648 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2649 }
2650 }
2651 d = pDev->pI2o_dev;
2652 if(d->lct_data.tid != tid) { // something changed
2653 pDev->tid = tid;
2654 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2655 if (pDev->pScsi_dev) {
2656 pDev->pScsi_dev->changed = TRUE;
2657 pDev->pScsi_dev->removable = TRUE;
2658 }
2659 }
2660 // Found it - mark it scanned
2661 pDev->state = DPTI_DEV_ONLINE;
2662 break;
2663 }
2664 pDev = pDev->next_lun;
2665 }
2666 }
2667 }
2668 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2669 pDev =(struct adpt_device*) pI2o_dev->owner;
2670 if(!pDev){
2671 continue;
2672 }
2673 // Drive offline drives that previously existed but could not be found
2674 // in the LCT table
2675 if (pDev->state & DPTI_DEV_UNSCANNED){
2676 pDev->state = DPTI_DEV_OFFLINE;
2677 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2678 if (pDev->pScsi_dev) {
2679 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2680 }
2681 }
2682 }
2683 return 0;
2684}
2685
2686static void adpt_fail_posted_scbs(adpt_hba* pHba)
2687{
2688 struct scsi_cmnd* cmd = NULL;
2689 struct scsi_device* d = NULL;
2690
2691 shost_for_each_device(d, pHba->host) {
2692 unsigned long flags;
2693 spin_lock_irqsave(&d->list_lock, flags);
2694 list_for_each_entry(cmd, &d->cmd_list, list) {
2695 if(cmd->serial_number == 0){
2696 continue;
2697 }
2698 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2699 cmd->scsi_done(cmd);
2700 }
2701 spin_unlock_irqrestore(&d->list_lock, flags);
2702 }
2703}
2704
2705
2706/*============================================================================
2707 * Routines from i2o subsystem
2708 *============================================================================
2709 */
2710
2711
2712
2713/*
2714 * Bring an I2O controller into HOLD state. See the spec.
2715 */
2716static int adpt_i2o_activate_hba(adpt_hba* pHba)
2717{
2718 int rcode;
2719
2720 if(pHba->initialized ) {
2721 if (adpt_i2o_status_get(pHba) < 0) {
2722 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2723 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2724 return rcode;
2725 }
2726 if (adpt_i2o_status_get(pHba) < 0) {
2727 printk(KERN_INFO "HBA not responding.\n");
2728 return -1;
2729 }
2730 }
2731
2732 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2733 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2734 return -1;
2735 }
2736
2737 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2738 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2739 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2740 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2741 adpt_i2o_reset_hba(pHba);
2742 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2743 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2744 return -1;
2745 }
2746 }
2747 } else {
2748 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2749 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2750 return rcode;
2751 }
2752
2753 }
2754
2755 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2756 return -1;
2757 }
2758
2759 /* In HOLD state */
2760
2761 if (adpt_i2o_hrt_get(pHba) < 0) {
2762 return -1;
2763 }
2764
2765 return 0;
2766}
2767
2768/*
2769 * Bring a controller online into OPERATIONAL state.
2770 */
2771
2772static int adpt_i2o_online_hba(adpt_hba* pHba)
2773{
2774 if (adpt_i2o_systab_send(pHba) < 0) {
2775 adpt_i2o_delete_hba(pHba);
2776 return -1;
2777 }
2778 /* In READY state */
2779
2780 if (adpt_i2o_enable_hba(pHba) < 0) {
2781 adpt_i2o_delete_hba(pHba);
2782 return -1;
2783 }
2784
2785 /* In OPERATIONAL state */
2786 return 0;
2787}
2788
2789static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2790{
2791 u32 __iomem *msg;
2792 ulong timeout = jiffies + 5*HZ;
2793
2794 while(m == EMPTY_QUEUE){
2795 rmb();
2796 m = readl(pHba->post_port);
2797 if(m != EMPTY_QUEUE){
2798 break;
2799 }
2800 if(time_after(jiffies,timeout)){
2801 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2802 return 2;
2803 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002804 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 }
2806 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2807 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2808 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2809 writel( 0,&msg[2]);
2810 wmb();
2811
2812 writel(m, pHba->post_port);
2813 wmb();
2814 return 0;
2815}
2816
2817static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2818{
2819 u8 *status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002820 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 u32 __iomem *msg = NULL;
2822 int i;
2823 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 u32 m;
2825
2826 do {
2827 rmb();
2828 m = readl(pHba->post_port);
2829 if (m != EMPTY_QUEUE) {
2830 break;
2831 }
2832
2833 if(time_after(jiffies,timeout)){
2834 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2835 return -ETIMEDOUT;
2836 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002837 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 } while(m == EMPTY_QUEUE);
2839
2840 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2841
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002842 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002843 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 adpt_send_nop(pHba, m);
2845 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2846 pHba->name);
2847 return -ENOMEM;
2848 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002849 memset(status, 0, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
2851 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2852 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2853 writel(0, &msg[2]);
2854 writel(0x0106, &msg[3]); /* Transaction context */
2855 writel(4096, &msg[4]); /* Host page frame size */
2856 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2857 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002858 writel((u32)addr, &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
2860 writel(m, pHba->post_port);
2861 wmb();
2862
2863 // Wait for the reply status to come back
2864 do {
2865 if (*status) {
2866 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2867 break;
2868 }
2869 }
2870 rmb();
2871 if(time_after(jiffies,timeout)){
2872 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002873 /* We lose 4 bytes of "status" here, but we
2874 cannot free these because controller may
2875 awake and corrupt those bytes at any time */
2876 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 return -ETIMEDOUT;
2878 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002879 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 } while (1);
2881
2882 // If the command was successful, fill the fifo with our reply
2883 // message packets
2884 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002885 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 return -2;
2887 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002888 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002890 if(pHba->reply_pool != NULL) {
2891 dma_free_coherent(&pHba->pDev->dev,
2892 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2893 pHba->reply_pool, pHba->reply_pool_pa);
2894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002896 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2897 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2898 &pHba->reply_pool_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002899 if (!pHba->reply_pool) {
2900 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2901 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002903 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 for(i = 0; i < pHba->reply_fifo_size; i++) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002906 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2907 pHba->reply_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 }
2910 adpt_i2o_status_get(pHba);
2911 return 0;
2912}
2913
2914
2915/*
2916 * I2O System Table. Contains information about
2917 * all the IOPs in the system. Used to inform IOPs
2918 * about each other's existence.
2919 *
2920 * sys_tbl_ver is the CurrentChangeIndicator that is
2921 * used by IOPs to track changes.
2922 */
2923
2924
2925
2926static s32 adpt_i2o_status_get(adpt_hba* pHba)
2927{
2928 ulong timeout;
2929 u32 m;
2930 u32 __iomem *msg;
2931 u8 *status_block=NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
2933 if(pHba->status_block == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002934 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2935 sizeof(i2o_status_block),
2936 &pHba->status_block_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 if(pHba->status_block == NULL) {
2938 printk(KERN_ERR
2939 "dpti%d: Get Status Block failed; Out of memory. \n",
2940 pHba->unit);
2941 return -ENOMEM;
2942 }
2943 }
2944 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2945 status_block = (u8*)(pHba->status_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2947 do {
2948 rmb();
2949 m = readl(pHba->post_port);
2950 if (m != EMPTY_QUEUE) {
2951 break;
2952 }
2953 if(time_after(jiffies,timeout)){
2954 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2955 pHba->name);
2956 return -ETIMEDOUT;
2957 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002958 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 } while(m==EMPTY_QUEUE);
2960
2961
2962 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2963
2964 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2965 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2966 writel(1, &msg[2]);
2967 writel(0, &msg[3]);
2968 writel(0, &msg[4]);
2969 writel(0, &msg[5]);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002970 writel( dma_low(pHba->status_block_pa), &msg[6]);
2971 writel( dma_high(pHba->status_block_pa), &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2973
2974 //post message
2975 writel(m, pHba->post_port);
2976 wmb();
2977
2978 while(status_block[87]!=0xff){
2979 if(time_after(jiffies,timeout)){
2980 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2981 pHba->unit);
2982 return -ETIMEDOUT;
2983 }
2984 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002985 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 }
2987
2988 // Set up our number of outbound and inbound messages
2989 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2990 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2991 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2992 }
2993
2994 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2995 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2996 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2997 }
2998
2999 // Calculate the Scatter Gather list size
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003000 if (dpt_dma64(pHba)) {
3001 pHba->sg_tablesize
3002 = ((pHba->status_block->inbound_frame_size * 4
3003 - 14 * sizeof(u32))
3004 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3005 } else {
3006 pHba->sg_tablesize
3007 = ((pHba->status_block->inbound_frame_size * 4
3008 - 12 * sizeof(u32))
3009 / sizeof(struct sg_simple_element));
3010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3012 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3013 }
3014
3015
3016#ifdef DEBUG
3017 printk("dpti%d: State = ",pHba->unit);
3018 switch(pHba->status_block->iop_state) {
3019 case 0x01:
3020 printk("INIT\n");
3021 break;
3022 case 0x02:
3023 printk("RESET\n");
3024 break;
3025 case 0x04:
3026 printk("HOLD\n");
3027 break;
3028 case 0x05:
3029 printk("READY\n");
3030 break;
3031 case 0x08:
3032 printk("OPERATIONAL\n");
3033 break;
3034 case 0x10:
3035 printk("FAILED\n");
3036 break;
3037 case 0x11:
3038 printk("FAULTED\n");
3039 break;
3040 default:
3041 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3042 }
3043#endif
3044 return 0;
3045}
3046
3047/*
3048 * Get the IOP's Logical Configuration Table
3049 */
3050static int adpt_i2o_lct_get(adpt_hba* pHba)
3051{
3052 u32 msg[8];
3053 int ret;
3054 u32 buf[16];
3055
3056 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3057 pHba->lct_size = pHba->status_block->expected_lct_size;
3058 }
3059 do {
3060 if (pHba->lct == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003061 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3062 pHba->lct_size, &pHba->lct_pa,
Julia Lawallda2907f2010-05-30 15:49:22 +02003063 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 if(pHba->lct == NULL) {
3065 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3066 pHba->name);
3067 return -ENOMEM;
3068 }
3069 }
3070 memset(pHba->lct, 0, pHba->lct_size);
3071
3072 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3073 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3074 msg[2] = 0;
3075 msg[3] = 0;
3076 msg[4] = 0xFFFFFFFF; /* All devices */
3077 msg[5] = 0x00000000; /* Report now */
3078 msg[6] = 0xD0000000|pHba->lct_size;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003079 msg[7] = (u32)pHba->lct_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080
3081 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3082 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3083 pHba->name, ret);
3084 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3085 return ret;
3086 }
3087
3088 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3089 pHba->lct_size = pHba->lct->table_size << 2;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003090 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3091 pHba->lct, pHba->lct_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 pHba->lct = NULL;
3093 }
3094 } while (pHba->lct == NULL);
3095
3096 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3097
3098
3099 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3100 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3101 pHba->FwDebugBufferSize = buf[1];
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003102 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3103 pHba->FwDebugBufferSize);
3104 if (pHba->FwDebugBuffer_P) {
3105 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3106 FW_DEBUG_FLAGS_OFFSET;
3107 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3108 FW_DEBUG_BLED_OFFSET;
3109 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3110 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3111 FW_DEBUG_STR_LENGTH_OFFSET;
3112 pHba->FwDebugBuffer_P += buf[2];
3113 pHba->FwDebugFlags = 0;
3114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 }
3116
3117 return 0;
3118}
3119
3120static int adpt_i2o_build_sys_table(void)
3121{
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003122 adpt_hba* pHba = hba_chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 int count = 0;
3124
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003125 if (sys_tbl)
3126 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3127 sys_tbl, sys_tbl_pa);
3128
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3130 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3131
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003132 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3133 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02003134 if (!sys_tbl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3136 return -ENOMEM;
3137 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003138 memset(sys_tbl, 0, sys_tbl_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139
3140 sys_tbl->num_entries = hba_count;
3141 sys_tbl->version = I2OVERSION;
3142 sys_tbl->change_ind = sys_tbl_ind++;
3143
3144 for(pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003145 u64 addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 // Get updated Status Block so we have the latest information
3147 if (adpt_i2o_status_get(pHba)) {
3148 sys_tbl->num_entries--;
3149 continue; // try next one
3150 }
3151
3152 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3153 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3154 sys_tbl->iops[count].seg_num = 0;
3155 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3156 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3157 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3158 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3159 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3160 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003161 addr = pHba->base_addr_phys + 0x40;
3162 sys_tbl->iops[count].inbound_low = dma_low(addr);
3163 sys_tbl->iops[count].inbound_high = dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
3165 count++;
3166 }
3167
3168#ifdef DEBUG
3169{
3170 u32 *table = (u32*)sys_tbl;
3171 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3172 for(count = 0; count < (sys_tbl_len >>2); count++) {
3173 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3174 count, table[count]);
3175 }
3176}
3177#endif
3178
3179 return 0;
3180}
3181
3182
3183/*
3184 * Dump the information block associated with a given unit (TID)
3185 */
3186
3187static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3188{
3189 char buf[64];
3190 int unit = d->lct_data.tid;
3191
3192 printk(KERN_INFO "TID %3.3d ", unit);
3193
3194 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3195 {
3196 buf[16]=0;
3197 printk(" Vendor: %-12.12s", buf);
3198 }
3199 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3200 {
3201 buf[16]=0;
3202 printk(" Device: %-12.12s", buf);
3203 }
3204 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3205 {
3206 buf[8]=0;
3207 printk(" Rev: %-12.12s\n", buf);
3208 }
3209#ifdef DEBUG
3210 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3211 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3212 printk(KERN_INFO "\tFlags: ");
3213
3214 if(d->lct_data.device_flags&(1<<0))
3215 printk("C"); // ConfigDialog requested
3216 if(d->lct_data.device_flags&(1<<1))
3217 printk("U"); // Multi-user capable
3218 if(!(d->lct_data.device_flags&(1<<4)))
3219 printk("P"); // Peer service enabled!
3220 if(!(d->lct_data.device_flags&(1<<5)))
3221 printk("M"); // Mgmt service enabled!
3222 printk("\n");
3223#endif
3224}
3225
3226#ifdef DEBUG
3227/*
3228 * Do i2o class name lookup
3229 */
3230static const char *adpt_i2o_get_class_name(int class)
3231{
3232 int idx = 16;
3233 static char *i2o_class_name[] = {
3234 "Executive",
3235 "Device Driver Module",
3236 "Block Device",
3237 "Tape Device",
3238 "LAN Interface",
3239 "WAN Interface",
3240 "Fibre Channel Port",
3241 "Fibre Channel Device",
3242 "SCSI Device",
3243 "ATE Port",
3244 "ATE Device",
3245 "Floppy Controller",
3246 "Floppy Device",
3247 "Secondary Bus Port",
3248 "Peer Transport Agent",
3249 "Peer Transport",
3250 "Unknown"
3251 };
3252
3253 switch(class&0xFFF) {
3254 case I2O_CLASS_EXECUTIVE:
3255 idx = 0; break;
3256 case I2O_CLASS_DDM:
3257 idx = 1; break;
3258 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3259 idx = 2; break;
3260 case I2O_CLASS_SEQUENTIAL_STORAGE:
3261 idx = 3; break;
3262 case I2O_CLASS_LAN:
3263 idx = 4; break;
3264 case I2O_CLASS_WAN:
3265 idx = 5; break;
3266 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3267 idx = 6; break;
3268 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3269 idx = 7; break;
3270 case I2O_CLASS_SCSI_PERIPHERAL:
3271 idx = 8; break;
3272 case I2O_CLASS_ATE_PORT:
3273 idx = 9; break;
3274 case I2O_CLASS_ATE_PERIPHERAL:
3275 idx = 10; break;
3276 case I2O_CLASS_FLOPPY_CONTROLLER:
3277 idx = 11; break;
3278 case I2O_CLASS_FLOPPY_DEVICE:
3279 idx = 12; break;
3280 case I2O_CLASS_BUS_ADAPTER_PORT:
3281 idx = 13; break;
3282 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3283 idx = 14; break;
3284 case I2O_CLASS_PEER_TRANSPORT:
3285 idx = 15; break;
3286 }
3287 return i2o_class_name[idx];
3288}
3289#endif
3290
3291
3292static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3293{
3294 u32 msg[6];
3295 int ret, size = sizeof(i2o_hrt);
3296
3297 do {
3298 if (pHba->hrt == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003299 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3300 size, &pHba->hrt_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 if (pHba->hrt == NULL) {
3302 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3303 return -ENOMEM;
3304 }
3305 }
3306
3307 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3308 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3309 msg[2]= 0;
3310 msg[3]= 0;
3311 msg[4]= (0xD0000000 | size); /* Simple transaction */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003312 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313
3314 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3315 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3316 return ret;
3317 }
3318
3319 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003320 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3321 dma_free_coherent(&pHba->pDev->dev, size,
3322 pHba->hrt, pHba->hrt_pa);
3323 size = newsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 pHba->hrt = NULL;
3325 }
3326 } while(pHba->hrt == NULL);
3327 return 0;
3328}
3329
3330/*
3331 * Query one scalar group value or a whole scalar group.
3332 */
3333static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3334 int group, int field, void *buf, int buflen)
3335{
3336 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003337 u8 *opblk_va;
3338 dma_addr_t opblk_pa;
3339 u8 *resblk_va;
3340 dma_addr_t resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 int size;
3343
3344 /* 8 bytes for header */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003345 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3346 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3347 if (resblk_va == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3349 return -ENOMEM;
3350 }
3351
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003352 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3353 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3354 if (opblk_va == NULL) {
3355 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3356 resblk_va, resblk_pa);
3357 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3358 pHba->name);
3359 return -ENOMEM;
3360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 if (field == -1) /* whole group */
3362 opblk[4] = -1;
3363
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003364 memcpy(opblk_va, opblk, sizeof(opblk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003366 opblk_va, opblk_pa, sizeof(opblk),
3367 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3368 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 if (size == -ETIME) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003370 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3371 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3373 return -ETIME;
3374 } else if (size == -EINTR) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003375 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3376 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3378 return -EINTR;
3379 }
3380
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003381 memcpy(buf, resblk_va+8, buflen); /* cut off header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003383 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3384 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 if (size < 0)
3386 return size;
3387
3388 return buflen;
3389}
3390
3391
3392/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3393 *
3394 * This function can be used for all UtilParamsGet/Set operations.
3395 * The OperationBlock is given in opblk-buffer,
3396 * and results are returned in resblk-buffer.
3397 * Note that the minimum sized resblk is 8 bytes and contains
3398 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3399 */
3400static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003401 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3402 void *resblk_va, dma_addr_t resblk_pa, int reslen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403{
3404 u32 msg[9];
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003405 u32 *res = (u32 *)resblk_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 int wait_status;
3407
3408 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3409 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3410 msg[2] = 0;
3411 msg[3] = 0;
3412 msg[4] = 0;
3413 msg[5] = 0x54000000 | oplen; /* OperationBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003414 msg[6] = (u32)opblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003416 msg[8] = (u32)resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
3418 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003419 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 return wait_status; /* -DetailedStatus */
3421 }
3422
3423 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3424 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3425 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3426 pHba->name,
3427 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3428 : "PARAMS_GET",
3429 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3430 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3431 }
3432
3433 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3434}
3435
3436
3437static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3438{
3439 u32 msg[4];
3440 int ret;
3441
3442 adpt_i2o_status_get(pHba);
3443
3444 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3445
3446 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3447 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3448 return 0;
3449 }
3450
3451 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3452 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3453 msg[2] = 0;
3454 msg[3] = 0;
3455
3456 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3457 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3458 pHba->unit, -ret);
3459 } else {
3460 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3461 }
3462
3463 adpt_i2o_status_get(pHba);
3464 return ret;
3465}
3466
3467
3468/*
3469 * Enable IOP. Allows the IOP to resume external operations.
3470 */
3471static int adpt_i2o_enable_hba(adpt_hba* pHba)
3472{
3473 u32 msg[4];
3474 int ret;
3475
3476 adpt_i2o_status_get(pHba);
3477 if(!pHba->status_block){
3478 return -ENOMEM;
3479 }
3480 /* Enable only allowed on READY state */
3481 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3482 return 0;
3483
3484 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3485 return -EINVAL;
3486
3487 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3488 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3489 msg[2]= 0;
3490 msg[3]= 0;
3491
3492 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3493 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3494 pHba->name, ret);
3495 } else {
3496 PDEBUG("%s: Enabled.\n", pHba->name);
3497 }
3498
3499 adpt_i2o_status_get(pHba);
3500 return ret;
3501}
3502
3503
3504static int adpt_i2o_systab_send(adpt_hba* pHba)
3505{
3506 u32 msg[12];
3507 int ret;
3508
3509 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3510 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3511 msg[2] = 0;
3512 msg[3] = 0;
3513 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3514 msg[5] = 0; /* Segment 0 */
3515
3516 /*
3517 * Provide three SGL-elements:
3518 * System table (SysTab), Private memory space declaration and
3519 * Private i/o space declaration
3520 */
3521 msg[6] = 0x54000000 | sys_tbl_len;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003522 msg[7] = (u32)sys_tbl_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 msg[8] = 0x54000000 | 0;
3524 msg[9] = 0;
3525 msg[10] = 0xD4000000 | 0;
3526 msg[11] = 0;
3527
3528 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3529 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3530 pHba->name, ret);
3531 }
3532#ifdef DEBUG
3533 else {
3534 PINFO("%s: SysTab set.\n", pHba->name);
3535 }
3536#endif
3537
3538 return ret;
3539 }
3540
3541
3542/*============================================================================
3543 *
3544 *============================================================================
3545 */
3546
3547
3548#ifdef UARTDELAY
3549
3550static static void adpt_delay(int millisec)
3551{
3552 int i;
3553 for (i = 0; i < millisec; i++) {
3554 udelay(1000); /* delay for one millisecond */
3555 }
3556}
3557
3558#endif
3559
Andrew Morton24601bb2007-12-10 15:49:20 -08003560static struct scsi_host_template driver_template = {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003561 .module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 .name = "dpt_i2o",
3563 .proc_name = "dpt_i2o",
Al Viroff98f7c2013-03-31 03:21:50 -04003564 .show_info = adpt_show_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 .info = adpt_info,
3566 .queuecommand = adpt_queue,
3567 .eh_abort_handler = adpt_abort,
3568 .eh_device_reset_handler = adpt_device_reset,
3569 .eh_bus_reset_handler = adpt_bus_reset,
3570 .eh_host_reset_handler = adpt_reset,
3571 .bios_param = adpt_bios_param,
3572 .slave_configure = adpt_slave_configure,
3573 .can_queue = MAX_TO_IOP_MESSAGES,
3574 .this_id = 7,
3575 .cmd_per_lun = 1,
3576 .use_clustering = ENABLE_CLUSTERING,
3577};
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003578
3579static int __init adpt_init(void)
3580{
3581 int error;
3582 adpt_hba *pHba, *next;
3583
3584 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3585
3586 error = adpt_detect(&driver_template);
3587 if (error < 0)
3588 return error;
3589 if (hba_chain == NULL)
3590 return -ENODEV;
3591
3592 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3593 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3594 if (error)
3595 goto fail;
3596 scsi_scan_host(pHba->host);
3597 }
3598 return 0;
3599fail:
3600 for (pHba = hba_chain; pHba; pHba = next) {
3601 next = pHba->next;
3602 scsi_remove_host(pHba->host);
3603 }
3604 return error;
3605}
3606
3607static void __exit adpt_exit(void)
3608{
3609 adpt_hba *pHba, *next;
3610
3611 for (pHba = hba_chain; pHba; pHba = pHba->next)
3612 scsi_remove_host(pHba->host);
3613 for (pHba = hba_chain; pHba; pHba = next) {
3614 next = pHba->next;
3615 adpt_release(pHba->host);
3616 }
3617}
3618
3619module_init(adpt_init);
3620module_exit(adpt_exit);
3621
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622MODULE_LICENSE("GPL");