blob: 8508816f303d08144fb8377da3c5c780ae7c5a9e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080053#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010058#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
78static dpt_sig_S DPTI_sig = {
79 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
80#ifdef __i386__
81 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
82#elif defined(__ia64__)
83 PROC_INTEL, PROC_IA64,
84#elif defined(__sparc__)
85 PROC_ULTRASPARC, PROC_ULTRASPARC,
86#elif defined(__alpha__)
87 PROC_ALPHA, PROC_ALPHA,
88#else
89 (-1),(-1),
90#endif
91 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
92 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
93 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
94};
95
96
97
98
99/*============================================================================
100 * Globals
101 *============================================================================
102 */
103
Arjan van de Ven0b950672006-01-11 13:16:10 +0100104static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200106static struct i2o_sys_tbl *sys_tbl;
107static dma_addr_t sys_tbl_pa;
108static int sys_tbl_ind;
109static int sys_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static adpt_hba* hba_chain = NULL;
112static int hba_count = 0;
113
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200114static struct class *adpt_sysfs_class;
115
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200116#ifdef CONFIG_COMPAT
117static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
118#endif
119
Arjan van de Ven00977a52007-02-12 00:55:34 -0800120static const struct file_operations adpt_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .ioctl = adpt_ioctl,
122 .open = adpt_open,
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200123 .release = adpt_close,
124#ifdef CONFIG_COMPAT
125 .compat_ioctl = compat_adpt_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129/* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132struct adpt_i2o_post_wait_data
133{
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138};
139
140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141static u32 adpt_post_wait_id = 0;
142static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145/*============================================================================
146 * Functions
147 *============================================================================
148 */
149
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165static u8 adpt_read_blink_led(adpt_hba* host)
166{
Harvey Harrison172c1222008-04-28 16:50:03 -0700167 if (host->FwDebugBLEDflag_P) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173}
174
175/*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180static struct pci_device_id dptids[] = {
181 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
182 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { 0, }
184};
185MODULE_DEVICE_TABLE(pci,dptids);
186
Andrew Morton24601bb2007-12-10 15:49:20 -0800187static int adpt_detect(struct scsi_host_template* sht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
189 struct pci_dev *pDev = NULL;
190 adpt_hba* pHba;
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 PINFO("Detecting Adaptec I2O RAID controllers...\n");
193
194 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100195 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if(pDev->device == PCI_DPT_DEVICE_ID ||
197 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Andrew Morton24601bb2007-12-10 15:49:20 -0800198 if(adpt_install_hba(sht, pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 PERROR("Could not Init an I2O RAID device\n");
200 PERROR("Will not try to detect others.\n");
201 return hba_count-1;
202 }
Alan Coxa07f3532006-09-15 15:34:32 +0100203 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 }
205 }
206
207 /* In INIT state, Activate IOPs */
208 for (pHba = hba_chain; pHba; pHba = pHba->next) {
209 // Activate does get status , init outbound, and get hrt
210 if (adpt_i2o_activate_hba(pHba) < 0) {
211 adpt_i2o_delete_hba(pHba);
212 }
213 }
214
215
216 /* Active IOPs in HOLD state */
217
218rebuild_sys_tab:
219 if (hba_chain == NULL)
220 return 0;
221
222 /*
223 * If build_sys_table fails, we kill everything and bail
224 * as we can't init the IOPs w/o a system table
225 */
226 if (adpt_i2o_build_sys_table() < 0) {
227 adpt_i2o_sys_shutdown();
228 return 0;
229 }
230
231 PDEBUG("HBA's in HOLD state\n");
232
233 /* If IOP don't get online, we need to rebuild the System table */
234 for (pHba = hba_chain; pHba; pHba = pHba->next) {
235 if (adpt_i2o_online_hba(pHba) < 0) {
236 adpt_i2o_delete_hba(pHba);
237 goto rebuild_sys_tab;
238 }
239 }
240
241 /* Active IOPs now in OPERATIONAL state */
242 PDEBUG("HBA's in OPERATIONAL state\n");
243
244 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
245 for (pHba = hba_chain; pHba; pHba = pHba->next) {
246 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
247 if (adpt_i2o_lct_get(pHba) < 0){
248 adpt_i2o_delete_hba(pHba);
249 continue;
250 }
251
252 if (adpt_i2o_parse_lct(pHba) < 0){
253 adpt_i2o_delete_hba(pHba);
254 continue;
255 }
256 adpt_inquiry(pHba);
257 }
258
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200259 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
260 if (IS_ERR(adpt_sysfs_class)) {
261 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
262 adpt_sysfs_class = NULL;
263 }
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 for (pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +0200266 if (adpt_scsi_host_alloc(pHba, sht) < 0){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 adpt_i2o_delete_hba(pHba);
268 continue;
269 }
270 pHba->initialized = TRUE;
271 pHba->state &= ~DPTI_STATE_RESET;
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200272 if (adpt_sysfs_class) {
273 struct device *dev = device_create(adpt_sysfs_class,
274 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
275 "dpti%d", pHba->unit);
276 if (IS_ERR(dev)) {
277 printk(KERN_WARNING"dpti%d: unable to "
278 "create device in dpt_i2o class\n",
279 pHba->unit);
280 }
281 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 }
283
284 // Register our control device node
285 // nodes will need to be created in /dev to access this
286 // the nodes can not be created from within the driver
287 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Andrew Morton24601bb2007-12-10 15:49:20 -0800288 adpt_i2o_sys_shutdown();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 return 0;
290 }
291 return hba_count;
292}
293
294
Andrew Morton24601bb2007-12-10 15:49:20 -0800295/*
296 * scsi_unregister will be called AFTER we return.
297 */
298static int adpt_release(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
Andrew Morton24601bb2007-12-10 15:49:20 -0800300 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301// adpt_i2o_quiesce_hba(pHba);
302 adpt_i2o_delete_hba(pHba);
Andrew Morton24601bb2007-12-10 15:49:20 -0800303 scsi_unregister(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 return 0;
305}
306
307
308static void adpt_inquiry(adpt_hba* pHba)
309{
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200310 u32 msg[17];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 u32 *mptr;
312 u32 *lenptr;
313 int direction;
314 int scsidir;
315 u32 len;
316 u32 reqlen;
317 u8* buf;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200318 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 u8 scb[16];
320 s32 rcode;
321
322 memset(msg, 0, sizeof(msg));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200323 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 if(!buf){
325 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
326 return;
327 }
328 memset((void*)buf, 0, 36);
329
330 len = 36;
331 direction = 0x00000000;
332 scsidir =0x40000000; // DATA IN (iop<--dev)
333
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200334 if (dpt_dma64(pHba))
335 reqlen = 17; // SINGLE SGE, 64 bit
336 else
337 reqlen = 14; // SINGLE SGE, 32 bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 /* Stick the headers on */
339 msg[0] = reqlen<<16 | SGL_OFFSET_12;
340 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
341 msg[2] = 0;
342 msg[3] = 0;
343 // Adaptec/DPT Private stuff
344 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
345 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
346 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
347 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
348 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
349 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
350 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
351
352 mptr=msg+7;
353
354 memset(scb, 0, sizeof(scb));
355 // Write SCSI command into the message - always 16 byte block
356 scb[0] = INQUIRY;
357 scb[1] = 0;
358 scb[2] = 0;
359 scb[3] = 0;
360 scb[4] = 36;
361 scb[5] = 0;
362 // Don't care about the rest of scb
363
364 memcpy(mptr, scb, sizeof(scb));
365 mptr+=4;
366 lenptr=mptr++; /* Remember me - fill in when we know */
367
368 /* Now fill in the SGList and command */
369 *lenptr = len;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200370 if (dpt_dma64(pHba)) {
371 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
372 *mptr++ = 1 << PAGE_SHIFT;
373 *mptr++ = 0xD0000000|direction|len;
374 *mptr++ = dma_low(addr);
375 *mptr++ = dma_high(addr);
376 } else {
377 *mptr++ = 0xD0000000|direction|len;
378 *mptr++ = addr;
379 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381 // Send it on it's way
382 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
383 if (rcode != 0) {
384 sprintf(pHba->detail, "Adaptec I2O RAID");
385 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
386 if (rcode != -ETIME && rcode != -EINTR)
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200387 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 } else {
389 memset(pHba->detail, 0, sizeof(pHba->detail));
390 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
391 memcpy(&(pHba->detail[16]), " Model: ", 8);
392 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
393 memcpy(&(pHba->detail[40]), " FW: ", 4);
394 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
395 pHba->detail[48] = '\0'; /* precautionary */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200396 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
398 adpt_i2o_status_get(pHba);
399 return ;
400}
401
402
403static int adpt_slave_configure(struct scsi_device * device)
404{
405 struct Scsi_Host *host = device->host;
406 adpt_hba* pHba;
407
408 pHba = (adpt_hba *) host->hostdata[0];
409
410 if (host->can_queue && device->tagged_supported) {
411 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
412 host->can_queue - 1);
413 } else {
414 scsi_adjust_queue_depth(device, 0, 1);
415 }
416 return 0;
417}
418
419static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
420{
421 adpt_hba* pHba = NULL;
422 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424 cmd->scsi_done = done;
425 /*
426 * SCSI REQUEST_SENSE commands will be executed automatically by the
427 * Host Adapter for any errors, so they should not be executed
428 * explicitly unless the Sense Data is zero indicating that no error
429 * occurred.
430 */
431
432 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
433 cmd->result = (DID_OK << 16);
434 cmd->scsi_done(cmd);
435 return 0;
436 }
437
438 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
439 if (!pHba) {
440 return FAILED;
441 }
442
443 rmb();
444 /*
445 * TODO: I need to block here if I am processing ioctl cmds
446 * but if the outstanding cmds all finish before the ioctl,
447 * the scsi-core will not know to start sending cmds to me again.
448 * I need to a way to restart the scsi-cores queues or should I block
449 * calling scsi_done on the outstanding cmds instead
450 * for now we don't set the IOCTL state
451 */
452 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
453 pHba->host->last_reset = jiffies;
454 pHba->host->resetting = 1;
455 return 1;
456 }
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 // TODO if the cmd->device if offline then I may need to issue a bus rescan
459 // followed by a get_lct to see if the device is there anymore
460 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
461 /*
462 * First command request for this device. Set up a pointer
463 * to the device structure. This should be a TEST_UNIT_READY
464 * command from scan_scsis_single.
465 */
466 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
467 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
468 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
469 cmd->result = (DID_NO_CONNECT << 16);
470 cmd->scsi_done(cmd);
471 return 0;
472 }
473 cmd->device->hostdata = pDev;
474 }
475 pDev->pScsi_dev = cmd->device;
476
477 /*
478 * If we are being called from when the device is being reset,
479 * delay processing of the command until later.
480 */
481 if (pDev->state & DPTI_DEV_RESET ) {
482 return FAILED;
483 }
484 return adpt_scsi_to_i2o(pHba, cmd, pDev);
485}
486
487static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
488 sector_t capacity, int geom[])
489{
490 int heads=-1;
491 int sectors=-1;
492 int cylinders=-1;
493
494 // *** First lets set the default geometry ****
495
496 // If the capacity is less than ox2000
497 if (capacity < 0x2000 ) { // floppy
498 heads = 18;
499 sectors = 2;
500 }
501 // else if between 0x2000 and 0x20000
502 else if (capacity < 0x20000) {
503 heads = 64;
504 sectors = 32;
505 }
506 // else if between 0x20000 and 0x40000
507 else if (capacity < 0x40000) {
508 heads = 65;
509 sectors = 63;
510 }
511 // else if between 0x4000 and 0x80000
512 else if (capacity < 0x80000) {
513 heads = 128;
514 sectors = 63;
515 }
516 // else if greater than 0x80000
517 else {
518 heads = 255;
519 sectors = 63;
520 }
521 cylinders = sector_div(capacity, heads * sectors);
522
523 // Special case if CDROM
524 if(sdev->type == 5) { // CDROM
525 heads = 252;
526 sectors = 63;
527 cylinders = 1111;
528 }
529
530 geom[0] = heads;
531 geom[1] = sectors;
532 geom[2] = cylinders;
533
534 PDEBUG("adpt_bios_param: exit\n");
535 return 0;
536}
537
538
539static const char *adpt_info(struct Scsi_Host *host)
540{
541 adpt_hba* pHba;
542
543 pHba = (adpt_hba *) host->hostdata[0];
544 return (char *) (pHba->detail);
545}
546
547static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
548 int length, int inout)
549{
550 struct adpt_device* d;
551 int id;
552 int chan;
553 int len = 0;
554 int begin = 0;
555 int pos = 0;
556 adpt_hba* pHba;
557 int unit;
558
559 *start = buffer;
560 if (inout == TRUE) {
561 /*
562 * The user has done a write and wants us to take the
563 * data in the buffer and do something with it.
564 * proc_scsiwrite calls us with inout = 1
565 *
566 * Read data from buffer (writing to us) - NOT SUPPORTED
567 */
568 return -EINVAL;
569 }
570
571 /*
572 * inout = 0 means the user has done a read and wants information
573 * returned, so we write information about the cards into the buffer
574 * proc_scsiread() calls us with inout = 0
575 */
576
577 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100578 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 for (pHba = hba_chain; pHba; pHba = pHba->next) {
580 if (pHba->host == host) {
581 break; /* found adapter */
582 }
583 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100584 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 if (pHba == NULL) {
586 return 0;
587 }
588 host = pHba->host;
589
590 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
591 len += sprintf(buffer+len, "%s\n", pHba->detail);
592 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
593 pHba->host->host_no, pHba->name, host->irq);
594 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
595 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
596
597 pos = begin + len;
598
599 /* CHECKPOINT */
600 if(pos > offset + length) {
601 goto stop_output;
602 }
603 if(pos <= offset) {
604 /*
605 * If we haven't even written to where we last left
606 * off (the last time we were called), reset the
607 * beginning pointer.
608 */
609 len = 0;
610 begin = pos;
611 }
612 len += sprintf(buffer+len, "Devices:\n");
613 for(chan = 0; chan < MAX_CHANNEL; chan++) {
614 for(id = 0; id < MAX_ID; id++) {
615 d = pHba->channel[chan].device[id];
616 while(d){
617 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
618 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
619 pos = begin + len;
620
621
622 /* CHECKPOINT */
623 if(pos > offset + length) {
624 goto stop_output;
625 }
626 if(pos <= offset) {
627 len = 0;
628 begin = pos;
629 }
630
631 unit = d->pI2o_dev->lct_data.tid;
632 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
633 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
634 scsi_device_online(d->pScsi_dev)? "online":"offline");
635 pos = begin + len;
636
637 /* CHECKPOINT */
638 if(pos > offset + length) {
639 goto stop_output;
640 }
641 if(pos <= offset) {
642 len = 0;
643 begin = pos;
644 }
645
646 d = d->next_lun;
647 }
648 }
649 }
650
651 /*
652 * begin is where we last checked our position with regards to offset
653 * begin is always less than offset. len is relative to begin. It
654 * is the number of bytes written past begin
655 *
656 */
657stop_output:
658 /* stop the output and calculate the correct length */
659 *(buffer + len) = '\0';
660
661 *start = buffer + (offset - begin); /* Start of wanted data */
662 len -= (offset - begin);
663 if(len > length) {
664 len = length;
665 } else if(len < 0){
666 len = 0;
667 **start = '\0';
668 }
669 return len;
670}
671
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200672/*
673 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
674 */
675static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
676{
677 return (u32)cmd->serial_number;
678}
679
680/*
681 * Go from a u32 'context' to a struct scsi_cmnd * .
682 * This could probably be made more efficient.
683 */
684static struct scsi_cmnd *
685 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
686{
687 struct scsi_cmnd * cmd;
688 struct scsi_device * d;
689
690 if (context == 0)
691 return NULL;
692
693 spin_unlock(pHba->host->host_lock);
694 shost_for_each_device(d, pHba->host) {
695 unsigned long flags;
696 spin_lock_irqsave(&d->list_lock, flags);
697 list_for_each_entry(cmd, &d->cmd_list, list) {
698 if (((u32)cmd->serial_number == context)) {
699 spin_unlock_irqrestore(&d->list_lock, flags);
700 scsi_device_put(d);
701 spin_lock(pHba->host->host_lock);
702 return cmd;
703 }
704 }
705 spin_unlock_irqrestore(&d->list_lock, flags);
706 }
707 spin_lock(pHba->host->host_lock);
708
709 return NULL;
710}
711
712/*
713 * Turn a pointer to ioctl reply data into an u32 'context'
714 */
715static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
716{
717#if BITS_PER_LONG == 32
718 return (u32)(unsigned long)reply;
719#else
720 ulong flags = 0;
721 u32 nr, i;
722
723 spin_lock_irqsave(pHba->host->host_lock, flags);
724 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
725 for (i = 0; i < nr; i++) {
726 if (pHba->ioctl_reply_context[i] == NULL) {
727 pHba->ioctl_reply_context[i] = reply;
728 break;
729 }
730 }
731 spin_unlock_irqrestore(pHba->host->host_lock, flags);
732 if (i >= nr) {
733 kfree (reply);
734 printk(KERN_WARNING"%s: Too many outstanding "
735 "ioctl commands\n", pHba->name);
736 return (u32)-1;
737 }
738
739 return i;
740#endif
741}
742
743/*
744 * Go from an u32 'context' to a pointer to ioctl reply data.
745 */
746static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
747{
748#if BITS_PER_LONG == 32
749 return (void *)(unsigned long)context;
750#else
751 void *p = pHba->ioctl_reply_context[context];
752 pHba->ioctl_reply_context[context] = NULL;
753
754 return p;
755#endif
756}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758/*===========================================================================
759 * Error Handling routines
760 *===========================================================================
761 */
762
763static int adpt_abort(struct scsi_cmnd * cmd)
764{
765 adpt_hba* pHba = NULL; /* host bus adapter structure */
766 struct adpt_device* dptdevice; /* dpt per device information */
767 u32 msg[5];
768 int rcode;
769
770 if(cmd->serial_number == 0){
771 return FAILED;
772 }
773 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
774 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
775 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
776 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
777 return FAILED;
778 }
779
780 memset(msg, 0, sizeof(msg));
781 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
782 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
783 msg[2] = 0;
784 msg[3]= 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200785 msg[4] = adpt_cmd_to_context(cmd);
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800786 if (pHba->host)
787 spin_lock_irq(pHba->host->host_lock);
788 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
789 if (pHba->host)
790 spin_unlock_irq(pHba->host->host_lock);
791 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if(rcode == -EOPNOTSUPP ){
793 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
794 return FAILED;
795 }
796 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
797 return FAILED;
798 }
799 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
800 return SUCCESS;
801}
802
803
804#define I2O_DEVICE_RESET 0x27
805// This is the same for BLK and SCSI devices
806// NOTE this is wrong in the i2o.h definitions
807// This is not currently supported by our adapter but we issue it anyway
808static int adpt_device_reset(struct scsi_cmnd* cmd)
809{
810 adpt_hba* pHba;
811 u32 msg[4];
812 u32 rcode;
813 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700814 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 pHba = (void*) cmd->device->host->hostdata[0];
817 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
818 if (!d) {
819 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
820 return FAILED;
821 }
822 memset(msg, 0, sizeof(msg));
823 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
824 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
825 msg[2] = 0;
826 msg[3] = 0;
827
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800828 if (pHba->host)
829 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 old_state = d->state;
831 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800832 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
833 d->state = old_state;
834 if (pHba->host)
835 spin_unlock_irq(pHba->host->host_lock);
836 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 if(rcode == -EOPNOTSUPP ){
838 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
839 return FAILED;
840 }
841 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
842 return FAILED;
843 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
845 return SUCCESS;
846 }
847}
848
849
850#define I2O_HBA_BUS_RESET 0x87
851// This version of bus reset is called by the eh_error handler
852static int adpt_bus_reset(struct scsi_cmnd* cmd)
853{
854 adpt_hba* pHba;
855 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800856 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
859 memset(msg, 0, sizeof(msg));
860 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
861 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
862 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
863 msg[2] = 0;
864 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800865 if (pHba->host)
866 spin_lock_irq(pHba->host->host_lock);
867 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
868 if (pHba->host)
869 spin_unlock_irq(pHba->host->host_lock);
870 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
872 return FAILED;
873 } else {
874 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
875 return SUCCESS;
876 }
877}
878
879// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400880static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881{
882 adpt_hba* pHba;
883 int rcode;
884 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
885 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
886 rcode = adpt_hba_reset(pHba);
887 if(rcode == 0){
888 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
889 return SUCCESS;
890 } else {
891 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
892 return FAILED;
893 }
894}
895
Jeff Garzik df0ae242005-05-28 07:57:14 -0400896static int adpt_reset(struct scsi_cmnd* cmd)
897{
898 int rc;
899
900 spin_lock_irq(cmd->device->host->host_lock);
901 rc = __adpt_reset(cmd);
902 spin_unlock_irq(cmd->device->host->host_lock);
903
904 return rc;
905}
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
908static int adpt_hba_reset(adpt_hba* pHba)
909{
910 int rcode;
911
912 pHba->state |= DPTI_STATE_RESET;
913
914 // Activate does get status , init outbound, and get hrt
915 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
916 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
917 adpt_i2o_delete_hba(pHba);
918 return rcode;
919 }
920
921 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
922 adpt_i2o_delete_hba(pHba);
923 return rcode;
924 }
925 PDEBUG("%s: in HOLD state\n",pHba->name);
926
927 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
928 adpt_i2o_delete_hba(pHba);
929 return rcode;
930 }
931 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
932
933 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
934 adpt_i2o_delete_hba(pHba);
935 return rcode;
936 }
937
938 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
939 adpt_i2o_delete_hba(pHba);
940 return rcode;
941 }
942 pHba->state &= ~DPTI_STATE_RESET;
943
944 adpt_fail_posted_scbs(pHba);
945 return 0; /* return success */
946}
947
948/*===========================================================================
949 *
950 *===========================================================================
951 */
952
953
954static void adpt_i2o_sys_shutdown(void)
955{
956 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100957 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
960 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
961 /* Delete all IOPs from the controller chain */
962 /* They should have already been released by the
963 * scsi-core
964 */
965 for (pHba = hba_chain; pHba; pHba = pNext) {
966 pNext = pHba->next;
967 adpt_i2o_delete_hba(pHba);
968 }
969
970 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971// spin_lock_irqsave(&adpt_post_wait_lock, flags);
972 /* Nothing should be outstanding at this point so just
973 * free them
974 */
Adrian Bunk458af542005-11-27 00:36:37 +0100975 for(p1 = adpt_post_wait_queue; p1;) {
976 old = p1;
977 p1 = p1->next;
978 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 }
980// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
981 adpt_post_wait_queue = NULL;
982
983 printk(KERN_INFO "Adaptec I2O controllers down.\n");
984}
985
Andrew Morton24601bb2007-12-10 15:49:20 -0800986static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987{
988
989 adpt_hba* pHba = NULL;
990 adpt_hba* p = NULL;
991 ulong base_addr0_phys = 0;
992 ulong base_addr1_phys = 0;
993 u32 hba_map0_area_size = 0;
994 u32 hba_map1_area_size = 0;
995 void __iomem *base_addr_virt = NULL;
996 void __iomem *msg_addr_virt = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200997 int dma64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 if(pci_enable_device(pDev)) {
1002 return -EINVAL;
1003 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -05001004
1005 if (pci_request_regions(pDev, "dpt_i2o")) {
1006 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1007 return -EINVAL;
1008 }
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 pci_set_master(pDev);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001011
1012 /*
1013 * See if we should enable dma64 mode.
1014 */
1015 if (sizeof(dma_addr_t) > 4 &&
1016 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
1017 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1018 dma64 = 1;
1019 }
1020 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return -EINVAL;
1022
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001023 /* adapter only supports message blocks below 4GB */
1024 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 base_addr0_phys = pci_resource_start(pDev,0);
1027 hba_map0_area_size = pci_resource_len(pDev,0);
1028
1029 // Check if standard PCI card or single BAR Raptor
1030 if(pDev->device == PCI_DPT_DEVICE_ID){
1031 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1032 // Raptor card with this device id needs 4M
1033 hba_map0_area_size = 0x400000;
1034 } else { // Not Raptor - it is a PCI card
1035 if(hba_map0_area_size > 0x100000 ){
1036 hba_map0_area_size = 0x100000;
1037 }
1038 }
1039 } else {// Raptor split BAR config
1040 // Use BAR1 in this configuration
1041 base_addr1_phys = pci_resource_start(pDev,1);
1042 hba_map1_area_size = pci_resource_len(pDev,1);
1043 raptorFlag = TRUE;
1044 }
1045
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001046#if BITS_PER_LONG == 64
1047 /*
1048 * The original Adaptec 64 bit driver has this comment here:
1049 * "x86_64 machines need more optimal mappings"
1050 *
1051 * I assume some HBAs report ridiculously large mappings
1052 * and we need to limit them on platforms with IOMMUs.
1053 */
1054 if (raptorFlag == TRUE) {
1055 if (hba_map0_area_size > 128)
1056 hba_map0_area_size = 128;
1057 if (hba_map1_area_size > 524288)
1058 hba_map1_area_size = 524288;
1059 } else {
1060 if (hba_map0_area_size > 524288)
1061 hba_map0_area_size = 524288;
1062 }
1063#endif
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1066 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -05001067 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 PERROR("dpti: adpt_config_hba: io remap failed\n");
1069 return -EINVAL;
1070 }
1071
1072 if(raptorFlag == TRUE) {
1073 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1074 if (!msg_addr_virt) {
1075 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1076 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001077 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 return -EINVAL;
1079 }
1080 } else {
1081 msg_addr_virt = base_addr_virt;
1082 }
1083
1084 // Allocate and zero the data structure
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02001085 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1086 if (!pHba) {
1087 if (msg_addr_virt != base_addr_virt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 iounmap(msg_addr_virt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001090 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 return -ENOMEM;
1092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
Arjan van de Ven0b950672006-01-11 13:16:10 +01001094 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
1096 if(hba_chain != NULL){
1097 for(p = hba_chain; p->next; p = p->next);
1098 p->next = pHba;
1099 } else {
1100 hba_chain = pHba;
1101 }
1102 pHba->next = NULL;
1103 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -07001104 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 hba_count++;
1106
Arjan van de Ven0b950672006-01-11 13:16:10 +01001107 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 pHba->pDev = pDev;
1110 pHba->base_addr_phys = base_addr0_phys;
1111
1112 // Set up the Virtual Base Address of the I2O Device
1113 pHba->base_addr_virt = base_addr_virt;
1114 pHba->msg_addr_virt = msg_addr_virt;
1115 pHba->irq_mask = base_addr_virt+0x30;
1116 pHba->post_port = base_addr_virt+0x40;
1117 pHba->reply_port = base_addr_virt+0x44;
1118
1119 pHba->hrt = NULL;
1120 pHba->lct = NULL;
1121 pHba->lct_size = 0;
1122 pHba->status_block = NULL;
1123 pHba->post_count = 0;
1124 pHba->state = DPTI_STATE_RESET;
1125 pHba->pDev = pDev;
1126 pHba->devices = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001127 pHba->dma64 = dma64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
1129 // Initializing the spinlocks
1130 spin_lock_init(&pHba->state_lock);
1131 spin_lock_init(&adpt_post_wait_lock);
1132
1133 if(raptorFlag == 0){
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001134 printk(KERN_INFO "Adaptec I2O RAID controller"
1135 " %d at %p size=%x irq=%d%s\n",
1136 hba_count-1, base_addr_virt,
1137 hba_map0_area_size, pDev->irq,
1138 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 } else {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001140 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1141 hba_count-1, pDev->irq,
1142 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1144 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1145 }
1146
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001147 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1149 adpt_i2o_delete_hba(pHba);
1150 return -EINVAL;
1151 }
1152
1153 return 0;
1154}
1155
1156
1157static void adpt_i2o_delete_hba(adpt_hba* pHba)
1158{
1159 adpt_hba* p1;
1160 adpt_hba* p2;
1161 struct i2o_device* d;
1162 struct i2o_device* next;
1163 int i;
1164 int j;
1165 struct adpt_device* pDev;
1166 struct adpt_device* pNext;
1167
1168
Arjan van de Ven0b950672006-01-11 13:16:10 +01001169 mutex_lock(&adpt_configuration_lock);
Andrew Morton24601bb2007-12-10 15:49:20 -08001170 // scsi_unregister calls our adpt_release which
1171 // does a quiese
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 if(pHba->host){
1173 free_irq(pHba->host->irq, pHba);
1174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 p2 = NULL;
1176 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1177 if(p1 == pHba) {
1178 if(p2) {
1179 p2->next = p1->next;
1180 } else {
1181 hba_chain = p1->next;
1182 }
1183 break;
1184 }
1185 }
1186
1187 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001188 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001191 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1193 iounmap(pHba->msg_addr_virt);
1194 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001195 if(pHba->FwDebugBuffer_P)
1196 iounmap(pHba->FwDebugBuffer_P);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001197 if(pHba->hrt) {
1198 dma_free_coherent(&pHba->pDev->dev,
1199 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1200 pHba->hrt, pHba->hrt_pa);
1201 }
1202 if(pHba->lct) {
1203 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1204 pHba->lct, pHba->lct_pa);
1205 }
1206 if(pHba->status_block) {
1207 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1208 pHba->status_block, pHba->status_block_pa);
1209 }
1210 if(pHba->reply_pool) {
1211 dma_free_coherent(&pHba->pDev->dev,
1212 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1213 pHba->reply_pool, pHba->reply_pool_pa);
1214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 for(d = pHba->devices; d ; d = next){
1217 next = d->next;
1218 kfree(d);
1219 }
1220 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1221 for(j = 0; j < MAX_ID; j++){
1222 if(pHba->channel[i].device[j] != NULL){
1223 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1224 pNext = pDev->next_lun;
1225 kfree(pDev);
1226 }
1227 }
1228 }
1229 }
Alan Coxa07f3532006-09-15 15:34:32 +01001230 pci_dev_put(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 kfree(pHba);
1232
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001233 if (adpt_sysfs_class)
1234 device_destroy(adpt_sysfs_class,
1235 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if(hba_count <= 0){
1238 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001239 if (adpt_sysfs_class) {
1240 class_destroy(adpt_sysfs_class);
1241 adpt_sysfs_class = NULL;
1242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 }
1244}
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1247{
1248 struct adpt_device* d;
1249
1250 if(chan < 0 || chan >= MAX_CHANNEL)
1251 return NULL;
1252
1253 if( pHba->channel[chan].device == NULL){
1254 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1255 return NULL;
1256 }
1257
1258 d = pHba->channel[chan].device[id];
1259 if(!d || d->tid == 0) {
1260 return NULL;
1261 }
1262
1263 /* If it is the only lun at that address then this should match*/
1264 if(d->scsi_lun == lun){
1265 return d;
1266 }
1267
1268 /* else we need to look through all the luns */
1269 for(d=d->next_lun ; d ; d = d->next_lun){
1270 if(d->scsi_lun == lun){
1271 return d;
1272 }
1273 }
1274 return NULL;
1275}
1276
1277
1278static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1279{
1280 // I used my own version of the WAIT_QUEUE_HEAD
1281 // to handle some version differences
1282 // When embedded in the kernel this could go back to the vanilla one
1283 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1284 int status = 0;
1285 ulong flags = 0;
1286 struct adpt_i2o_post_wait_data *p1, *p2;
1287 struct adpt_i2o_post_wait_data *wait_data =
1288 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
Andrew Morton4452ea52005-06-23 00:10:26 -07001289 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Andrew Morton4452ea52005-06-23 00:10:26 -07001291 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 /*
1295 * The spin locking is needed to keep anyone from playing
1296 * with the queue pointers and id while we do the same
1297 */
1298 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1299 // TODO we need a MORE unique way of getting ids
1300 // to support async LCT get
1301 wait_data->next = adpt_post_wait_queue;
1302 adpt_post_wait_queue = wait_data;
1303 adpt_post_wait_id++;
1304 adpt_post_wait_id &= 0x7fff;
1305 wait_data->id = adpt_post_wait_id;
1306 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1307
1308 wait_data->wq = &adpt_wq_i2o_post;
1309 wait_data->status = -ETIMEDOUT;
1310
Andrew Morton4452ea52005-06-23 00:10:26 -07001311 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
1313 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1314 timeout *= HZ;
1315 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1316 set_current_state(TASK_INTERRUPTIBLE);
1317 if(pHba->host)
1318 spin_unlock_irq(pHba->host->host_lock);
1319 if (!timeout)
1320 schedule();
1321 else{
1322 timeout = schedule_timeout(timeout);
1323 if (timeout == 0) {
1324 // I/O issued, but cannot get result in
1325 // specified time. Freeing resorces is
1326 // dangerous.
1327 status = -ETIME;
1328 }
1329 }
1330 if(pHba->host)
1331 spin_lock_irq(pHba->host->host_lock);
1332 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001333 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 if(status == -ETIMEDOUT){
1336 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1337 // We will have to free the wait_data memory during shutdown
1338 return status;
1339 }
1340
1341 /* Remove the entry from the queue. */
1342 p2 = NULL;
1343 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1344 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1345 if(p1 == wait_data) {
1346 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1347 status = -EOPNOTSUPP;
1348 }
1349 if(p2) {
1350 p2->next = p1->next;
1351 } else {
1352 adpt_post_wait_queue = p1->next;
1353 }
1354 break;
1355 }
1356 }
1357 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1358
1359 kfree(wait_data);
1360
1361 return status;
1362}
1363
1364
1365static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1366{
1367
1368 u32 m = EMPTY_QUEUE;
1369 u32 __iomem *msg;
1370 ulong timeout = jiffies + 30*HZ;
1371 do {
1372 rmb();
1373 m = readl(pHba->post_port);
1374 if (m != EMPTY_QUEUE) {
1375 break;
1376 }
1377 if(time_after(jiffies,timeout)){
1378 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1379 return -ETIMEDOUT;
1380 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001381 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 } while(m == EMPTY_QUEUE);
1383
1384 msg = pHba->msg_addr_virt + m;
1385 memcpy_toio(msg, data, len);
1386 wmb();
1387
1388 //post message
1389 writel(m, pHba->post_port);
1390 wmb();
1391
1392 return 0;
1393}
1394
1395
1396static void adpt_i2o_post_wait_complete(u32 context, int status)
1397{
1398 struct adpt_i2o_post_wait_data *p1 = NULL;
1399 /*
1400 * We need to search through the adpt_post_wait
1401 * queue to see if the given message is still
1402 * outstanding. If not, it means that the IOP
1403 * took longer to respond to the message than we
1404 * had allowed and timer has already expired.
1405 * Not much we can do about that except log
1406 * it for debug purposes, increase timeout, and recompile
1407 *
1408 * Lock needed to keep anyone from moving queue pointers
1409 * around while we're looking through them.
1410 */
1411
1412 context &= 0x7fff;
1413
1414 spin_lock(&adpt_post_wait_lock);
1415 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1416 if(p1->id == context) {
1417 p1->status = status;
1418 spin_unlock(&adpt_post_wait_lock);
1419 wake_up_interruptible(p1->wq);
1420 return;
1421 }
1422 }
1423 spin_unlock(&adpt_post_wait_lock);
1424 // If this happens we lose commands that probably really completed
1425 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1426 printk(KERN_DEBUG" Tasks in wait queue:\n");
1427 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1428 printk(KERN_DEBUG" %d\n",p1->id);
1429 }
1430 return;
1431}
1432
1433static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1434{
1435 u32 msg[8];
1436 u8* status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001437 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 u32 m = EMPTY_QUEUE ;
1439 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1440
1441 if(pHba->initialized == FALSE) { // First time reset should be quick
1442 timeout = jiffies + (25*HZ);
1443 } else {
1444 adpt_i2o_quiesce_hba(pHba);
1445 }
1446
1447 do {
1448 rmb();
1449 m = readl(pHba->post_port);
1450 if (m != EMPTY_QUEUE) {
1451 break;
1452 }
1453 if(time_after(jiffies,timeout)){
1454 printk(KERN_WARNING"Timeout waiting for message!\n");
1455 return -ETIMEDOUT;
1456 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001457 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 } while (m == EMPTY_QUEUE);
1459
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001460 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 if(status == NULL) {
1462 adpt_send_nop(pHba, m);
1463 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1464 return -ENOMEM;
1465 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001466 memset(status,0,4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1469 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1470 msg[2]=0;
1471 msg[3]=0;
1472 msg[4]=0;
1473 msg[5]=0;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001474 msg[6]=dma_low(addr);
1475 msg[7]=dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1478 wmb();
1479 writel(m, pHba->post_port);
1480 wmb();
1481
1482 while(*status == 0){
1483 if(time_after(jiffies,timeout)){
1484 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001485 /* We lose 4 bytes of "status" here, but we cannot
1486 free these because controller may awake and corrupt
1487 those bytes at any time */
1488 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return -ETIMEDOUT;
1490 }
1491 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001492 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 }
1494
1495 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1496 PDEBUG("%s: Reset in progress...\n", pHba->name);
1497 // Here we wait for message frame to become available
1498 // indicated that reset has finished
1499 do {
1500 rmb();
1501 m = readl(pHba->post_port);
1502 if (m != EMPTY_QUEUE) {
1503 break;
1504 }
1505 if(time_after(jiffies,timeout)){
1506 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001507 /* We lose 4 bytes of "status" here, but we
1508 cannot free these because controller may
1509 awake and corrupt those bytes at any time */
1510 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 return -ETIMEDOUT;
1512 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001513 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 } while (m == EMPTY_QUEUE);
1515 // Flush the offset
1516 adpt_send_nop(pHba, m);
1517 }
1518 adpt_i2o_status_get(pHba);
1519 if(*status == 0x02 ||
1520 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1521 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1522 pHba->name);
1523 } else {
1524 PDEBUG("%s: Reset completed.\n", pHba->name);
1525 }
1526
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001527 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528#ifdef UARTDELAY
1529 // This delay is to allow someone attached to the card through the debug UART to
1530 // set up the dump levels that they want before the rest of the initialization sequence
1531 adpt_delay(20000);
1532#endif
1533 return 0;
1534}
1535
1536
1537static int adpt_i2o_parse_lct(adpt_hba* pHba)
1538{
1539 int i;
1540 int max;
1541 int tid;
1542 struct i2o_device *d;
1543 i2o_lct *lct = pHba->lct;
1544 u8 bus_no = 0;
1545 s16 scsi_id;
1546 s16 scsi_lun;
1547 u32 buf[10]; // larger than 7, or 8 ...
1548 struct adpt_device* pDev;
1549
1550 if (lct == NULL) {
1551 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1552 return -1;
1553 }
1554
1555 max = lct->table_size;
1556 max -= 3;
1557 max /= 9;
1558
1559 for(i=0;i<max;i++) {
1560 if( lct->lct_entry[i].user_tid != 0xfff){
1561 /*
1562 * If we have hidden devices, we need to inform the upper layers about
1563 * the possible maximum id reference to handle device access when
1564 * an array is disassembled. This code has no other purpose but to
1565 * allow us future access to devices that are currently hidden
1566 * behind arrays, hotspares or have not been configured (JBOD mode).
1567 */
1568 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1569 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1570 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1571 continue;
1572 }
1573 tid = lct->lct_entry[i].tid;
1574 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1575 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1576 continue;
1577 }
1578 bus_no = buf[0]>>16;
1579 scsi_id = buf[1];
1580 scsi_lun = (buf[2]>>8 )&0xff;
1581 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1582 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1583 continue;
1584 }
1585 if (scsi_id >= MAX_ID){
1586 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1587 continue;
1588 }
1589 if(bus_no > pHba->top_scsi_channel){
1590 pHba->top_scsi_channel = bus_no;
1591 }
1592 if(scsi_id > pHba->top_scsi_id){
1593 pHba->top_scsi_id = scsi_id;
1594 }
1595 if(scsi_lun > pHba->top_scsi_lun){
1596 pHba->top_scsi_lun = scsi_lun;
1597 }
1598 continue;
1599 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001600 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 if(d==NULL)
1602 {
1603 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1604 return -ENOMEM;
1605 }
1606
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001607 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 d->next = NULL;
1609
1610 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1611
1612 d->flags = 0;
1613 tid = d->lct_data.tid;
1614 adpt_i2o_report_hba_unit(pHba, d);
1615 adpt_i2o_install_device(pHba, d);
1616 }
1617 bus_no = 0;
1618 for(d = pHba->devices; d ; d = d->next) {
1619 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1620 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1621 tid = d->lct_data.tid;
1622 // TODO get the bus_no from hrt-but for now they are in order
1623 //bus_no =
1624 if(bus_no > pHba->top_scsi_channel){
1625 pHba->top_scsi_channel = bus_no;
1626 }
1627 pHba->channel[bus_no].type = d->lct_data.class_id;
1628 pHba->channel[bus_no].tid = tid;
1629 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1630 {
1631 pHba->channel[bus_no].scsi_id = buf[1];
1632 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1633 }
1634 // TODO remove - this is just until we get from hrt
1635 bus_no++;
1636 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1637 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1638 break;
1639 }
1640 }
1641 }
1642
1643 // Setup adpt_device table
1644 for(d = pHba->devices; d ; d = d->next) {
1645 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1646 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1647 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1648
1649 tid = d->lct_data.tid;
1650 scsi_id = -1;
1651 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1652 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1653 bus_no = buf[0]>>16;
1654 scsi_id = buf[1];
1655 scsi_lun = (buf[2]>>8 )&0xff;
1656 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1657 continue;
1658 }
1659 if (scsi_id >= MAX_ID) {
1660 continue;
1661 }
1662 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301663 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 if(pDev == NULL) {
1665 return -ENOMEM;
1666 }
1667 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 } else {
1669 for( pDev = pHba->channel[bus_no].device[scsi_id];
1670 pDev->next_lun; pDev = pDev->next_lun){
1671 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301672 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 if(pDev->next_lun == NULL) {
1674 return -ENOMEM;
1675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 pDev = pDev->next_lun;
1677 }
1678 pDev->tid = tid;
1679 pDev->scsi_channel = bus_no;
1680 pDev->scsi_id = scsi_id;
1681 pDev->scsi_lun = scsi_lun;
1682 pDev->pI2o_dev = d;
1683 d->owner = pDev;
1684 pDev->type = (buf[0])&0xff;
1685 pDev->flags = (buf[0]>>8)&0xff;
1686 if(scsi_id > pHba->top_scsi_id){
1687 pHba->top_scsi_id = scsi_id;
1688 }
1689 if(scsi_lun > pHba->top_scsi_lun){
1690 pHba->top_scsi_lun = scsi_lun;
1691 }
1692 }
1693 if(scsi_id == -1){
1694 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1695 d->lct_data.identity_tag);
1696 }
1697 }
1698 }
1699 return 0;
1700}
1701
1702
1703/*
1704 * Each I2O controller has a chain of devices on it - these match
1705 * the useful parts of the LCT of the board.
1706 */
1707
1708static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1709{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001710 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 d->controller=pHba;
1712 d->owner=NULL;
1713 d->next=pHba->devices;
1714 d->prev=NULL;
1715 if (pHba->devices != NULL){
1716 pHba->devices->prev=d;
1717 }
1718 pHba->devices=d;
1719 *d->dev_name = 0;
1720
Arjan van de Ven0b950672006-01-11 13:16:10 +01001721 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 return 0;
1723}
1724
1725static int adpt_open(struct inode *inode, struct file *file)
1726{
1727 int minor;
1728 adpt_hba* pHba;
1729
1730 //TODO check for root access
1731 //
1732 minor = iminor(inode);
1733 if (minor >= hba_count) {
1734 return -ENXIO;
1735 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001736 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1738 if (pHba->unit == minor) {
1739 break; /* found adapter */
1740 }
1741 }
1742 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001743 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 return -ENXIO;
1745 }
1746
1747// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001748 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749// return -EBUSY;
1750// }
1751
1752 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001753 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 return 0;
1756}
1757
1758static int adpt_close(struct inode *inode, struct file *file)
1759{
1760 int minor;
1761 adpt_hba* pHba;
1762
1763 minor = iminor(inode);
1764 if (minor >= hba_count) {
1765 return -ENXIO;
1766 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001767 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1769 if (pHba->unit == minor) {
1770 break; /* found adapter */
1771 }
1772 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001773 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 if (pHba == NULL) {
1775 return -ENXIO;
1776 }
1777
1778 pHba->in_use = 0;
1779
1780 return 0;
1781}
1782
1783
1784static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1785{
1786 u32 msg[MAX_MESSAGE_SIZE];
1787 u32* reply = NULL;
1788 u32 size = 0;
1789 u32 reply_size = 0;
1790 u32 __user *user_msg = arg;
1791 u32 __user * user_reply = NULL;
1792 void *sg_list[pHba->sg_tablesize];
1793 u32 sg_offset = 0;
1794 u32 sg_count = 0;
1795 int sg_index = 0;
1796 u32 i = 0;
1797 u32 rcode = 0;
1798 void *p = NULL;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001799 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 ulong flags = 0;
1801
1802 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1803 // get user msg size in u32s
1804 if(get_user(size, &user_msg[0])){
1805 return -EFAULT;
1806 }
1807 size = size>>16;
1808
1809 user_reply = &user_msg[size];
1810 if(size > MAX_MESSAGE_SIZE){
1811 return -EFAULT;
1812 }
1813 size *= 4; // Convert to bytes
1814
1815 /* Copy in the user's I2O command */
1816 if(copy_from_user(msg, user_msg, size)) {
1817 return -EFAULT;
1818 }
1819 get_user(reply_size, &user_reply[0]);
1820 reply_size = reply_size>>16;
1821 if(reply_size > REPLY_FRAME_SIZE){
1822 reply_size = REPLY_FRAME_SIZE;
1823 }
1824 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301825 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 if(reply == NULL) {
1827 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1828 return -ENOMEM;
1829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 sg_offset = (msg[0]>>4)&0xf;
1831 msg[2] = 0x40000000; // IOCTL context
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001832 msg[3] = adpt_ioctl_to_context(pHba, reply);
1833 if (msg[3] == (u32)-1)
1834 return -EBUSY;
1835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1837 if(sg_offset) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001838 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1840 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1841 if (sg_count > pHba->sg_tablesize){
1842 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1843 kfree (reply);
1844 return -EINVAL;
1845 }
1846
1847 for(i = 0; i < sg_count; i++) {
1848 int sg_size;
1849
1850 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1851 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1852 rcode = -EINVAL;
1853 goto cleanup;
1854 }
1855 sg_size = sg[i].flag_count & 0xffffff;
1856 /* Allocate memory for the transfer */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001857 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 if(!p) {
1859 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1860 pHba->name,sg_size,i,sg_count);
1861 rcode = -ENOMEM;
1862 goto cleanup;
1863 }
1864 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1865 /* Copy in the user's SG buffer if necessary */
1866 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001867 // sg_simple_element API is 32 bit
1868 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1870 rcode = -EFAULT;
1871 goto cleanup;
1872 }
1873 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001874 /* sg_simple_element API is 32 bit, but addr < 4GB */
1875 sg[i].addr_bus = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 }
1877 }
1878
1879 do {
1880 if(pHba->host)
1881 spin_lock_irqsave(pHba->host->host_lock, flags);
1882 // This state stops any new commands from enterring the
1883 // controller while processing the ioctl
1884// pHba->state |= DPTI_STATE_IOCTL;
1885// We can't set this now - The scsi subsystem sets host_blocked and
1886// the queue empties and stops. We need a way to restart the queue
1887 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1888 if (rcode != 0)
1889 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1890 rcode, reply);
1891// pHba->state &= ~DPTI_STATE_IOCTL;
1892 if(pHba->host)
1893 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1894 } while(rcode == -ETIMEDOUT);
1895
1896 if(rcode){
1897 goto cleanup;
1898 }
1899
1900 if(sg_offset) {
1901 /* Copy back the Scatter Gather buffers back to user space */
1902 u32 j;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001903 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 struct sg_simple_element* sg;
1905 int sg_size;
1906
1907 // re-acquire the original message to handle correctly the sg copy operation
1908 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1909 // get user msg size in u32s
1910 if(get_user(size, &user_msg[0])){
1911 rcode = -EFAULT;
1912 goto cleanup;
1913 }
1914 size = size>>16;
1915 size *= 4;
1916 /* Copy in the user's I2O command */
1917 if (copy_from_user (msg, user_msg, size)) {
1918 rcode = -EFAULT;
1919 goto cleanup;
1920 }
1921 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1922
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001923 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 sg = (struct sg_simple_element*)(msg + sg_offset);
1925 for (j = 0; j < sg_count; j++) {
1926 /* Copy out the SG list to user's buffer if necessary */
1927 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1928 sg_size = sg[j].flag_count & 0xffffff;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001929 // sg_simple_element API is 32 bit
1930 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1932 rcode = -EFAULT;
1933 goto cleanup;
1934 }
1935 }
1936 }
1937 }
1938
1939 /* Copy back the reply to user space */
1940 if (reply_size) {
1941 // we wrote our own values for context - now restore the user supplied ones
1942 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1943 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1944 rcode = -EFAULT;
1945 }
1946 if(copy_to_user(user_reply, reply, reply_size)) {
1947 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1948 rcode = -EFAULT;
1949 }
1950 }
1951
1952
1953cleanup:
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001954 if (rcode != -ETIME && rcode != -EINTR) {
1955 struct sg_simple_element *sg =
1956 (struct sg_simple_element*) (msg +sg_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 kfree (reply);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001958 while(sg_index) {
1959 if(sg_list[--sg_index]) {
1960 dma_free_coherent(&pHba->pDev->dev,
1961 sg[sg_index].flag_count & 0xffffff,
1962 sg_list[sg_index],
1963 sg[sg_index].addr_bus);
1964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 }
1966 }
1967 return rcode;
1968}
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970#if defined __ia64__
1971static void adpt_ia64_info(sysInfo_S* si)
1972{
1973 // This is all the info we need for now
1974 // We will add more info as our new
1975 // managmenent utility requires it
1976 si->processorType = PROC_IA64;
1977}
1978#endif
1979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980#if defined __sparc__
1981static void adpt_sparc_info(sysInfo_S* si)
1982{
1983 // This is all the info we need for now
1984 // We will add more info as our new
1985 // managmenent utility requires it
1986 si->processorType = PROC_ULTRASPARC;
1987}
1988#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989#if defined __alpha__
1990static void adpt_alpha_info(sysInfo_S* si)
1991{
1992 // This is all the info we need for now
1993 // We will add more info as our new
1994 // managmenent utility requires it
1995 si->processorType = PROC_ALPHA;
1996}
1997#endif
1998
1999#if defined __i386__
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000static void adpt_i386_info(sysInfo_S* si)
2001{
2002 // This is all the info we need for now
2003 // We will add more info as our new
2004 // managmenent utility requires it
2005 switch (boot_cpu_data.x86) {
2006 case CPU_386:
2007 si->processorType = PROC_386;
2008 break;
2009 case CPU_486:
2010 si->processorType = PROC_486;
2011 break;
2012 case CPU_586:
2013 si->processorType = PROC_PENTIUM;
2014 break;
2015 default: // Just in case
2016 si->processorType = PROC_PENTIUM;
2017 break;
2018 }
2019}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020#endif
2021
Andrew Morton8b2cc912008-05-06 20:42:42 -07002022/*
2023 * This routine returns information about the system. This does not effect
2024 * any logic and if the info is wrong - it doesn't matter.
2025 */
2026
2027/* Get all the info we can not get from kernel services */
2028static int adpt_system_info(void __user *buffer)
2029{
2030 sysInfo_S si;
2031
2032 memset(&si, 0, sizeof(si));
2033
2034 si.osType = OS_LINUX;
2035 si.osMajorVersion = 0;
2036 si.osMinorVersion = 0;
2037 si.osRevision = 0;
2038 si.busType = SI_PCI_BUS;
2039 si.processorFamily = DPTI_sig.dsProcessorFamily;
2040
2041#if defined __i386__
2042 adpt_i386_info(&si);
2043#elif defined (__ia64__)
2044 adpt_ia64_info(&si);
2045#elif defined(__sparc__)
2046 adpt_sparc_info(&si);
2047#elif defined (__alpha__)
2048 adpt_alpha_info(&si);
2049#else
2050 si.processorType = 0xff ;
2051#endif
2052 if (copy_to_user(buffer, &si, sizeof(si))){
2053 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2054 return -EFAULT;
2055 }
2056
2057 return 0;
2058}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
2060static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2061 ulong arg)
2062{
2063 int minor;
2064 int error = 0;
2065 adpt_hba* pHba;
2066 ulong flags = 0;
2067 void __user *argp = (void __user *)arg;
2068
2069 minor = iminor(inode);
2070 if (minor >= DPTI_MAX_HBA){
2071 return -ENXIO;
2072 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002073 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2075 if (pHba->unit == minor) {
2076 break; /* found adapter */
2077 }
2078 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002079 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 if(pHba == NULL){
2081 return -ENXIO;
2082 }
2083
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002084 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2085 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
2087 switch (cmd) {
2088 // TODO: handle 3 cases
2089 case DPT_SIGNATURE:
2090 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2091 return -EFAULT;
2092 }
2093 break;
2094 case I2OUSRCMD:
2095 return adpt_i2o_passthru(pHba, argp);
2096
2097 case DPT_CTRLINFO:{
2098 drvrHBAinfo_S HbaInfo;
2099
2100#define FLG_OSD_PCI_VALID 0x0001
2101#define FLG_OSD_DMA 0x0002
2102#define FLG_OSD_I2O 0x0004
2103 memset(&HbaInfo, 0, sizeof(HbaInfo));
2104 HbaInfo.drvrHBAnum = pHba->unit;
2105 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2106 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2107 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2108 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2109 HbaInfo.Interrupt = pHba->pDev->irq;
2110 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2111 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2112 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2113 return -EFAULT;
2114 }
2115 break;
2116 }
2117 case DPT_SYSINFO:
2118 return adpt_system_info(argp);
2119 case DPT_BLINKLED:{
2120 u32 value;
2121 value = (u32)adpt_read_blink_led(pHba);
2122 if (copy_to_user(argp, &value, sizeof(value))) {
2123 return -EFAULT;
2124 }
2125 break;
2126 }
2127 case I2ORESETCMD:
2128 if(pHba->host)
2129 spin_lock_irqsave(pHba->host->host_lock, flags);
2130 adpt_hba_reset(pHba);
2131 if(pHba->host)
2132 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2133 break;
2134 case I2ORESCANCMD:
2135 adpt_rescan(pHba);
2136 break;
2137 default:
2138 return -EINVAL;
2139 }
2140
2141 return error;
2142}
2143
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002144#ifdef CONFIG_COMPAT
2145static long compat_adpt_ioctl(struct file *file,
2146 unsigned int cmd, unsigned long arg)
2147{
2148 struct inode *inode;
2149 long ret;
2150
2151 inode = file->f_dentry->d_inode;
2152
2153 lock_kernel();
2154
2155 switch(cmd) {
2156 case DPT_SIGNATURE:
2157 case I2OUSRCMD:
2158 case DPT_CTRLINFO:
2159 case DPT_SYSINFO:
2160 case DPT_BLINKLED:
2161 case I2ORESETCMD:
2162 case I2ORESCANCMD:
2163 case (DPT_TARGET_BUSY & 0xFFFF):
2164 case DPT_TARGET_BUSY:
2165 ret = adpt_ioctl(inode, file, cmd, arg);
2166 break;
2167 default:
2168 ret = -ENOIOCTLCMD;
2169 }
2170
2171 unlock_kernel();
2172
2173 return ret;
2174}
2175#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
David Howells7d12e782006-10-05 14:55:46 +01002177static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
2179 struct scsi_cmnd* cmd;
2180 adpt_hba* pHba = dev_id;
2181 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002182 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 u32 status=0;
2184 u32 context;
2185 ulong flags = 0;
2186 int handled = 0;
2187
2188 if (pHba == NULL){
2189 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2190 return IRQ_NONE;
2191 }
2192 if(pHba->host)
2193 spin_lock_irqsave(pHba->host->host_lock, flags);
2194
2195 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2196 m = readl(pHba->reply_port);
2197 if(m == EMPTY_QUEUE){
2198 // Try twice then give up
2199 rmb();
2200 m = readl(pHba->reply_port);
2201 if(m == EMPTY_QUEUE){
2202 // This really should not happen
2203 printk(KERN_ERR"dpti: Could not get reply frame\n");
2204 goto out;
2205 }
2206 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002207 if (pHba->reply_pool_pa <= m &&
2208 m < pHba->reply_pool_pa +
2209 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2210 reply = (u8 *)pHba->reply_pool +
2211 (m - pHba->reply_pool_pa);
2212 } else {
2213 /* Ick, we should *never* be here */
2214 printk(KERN_ERR "dpti: reply frame not from pool\n");
2215 reply = (u8 *)bus_to_virt(m);
2216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
2218 if (readl(reply) & MSG_FAIL) {
2219 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002220 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 u32 old_context;
2222 PDEBUG("%s: Failed message\n",pHba->name);
2223 if(old_m >= 0x100000){
2224 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2225 writel(m,pHba->reply_port);
2226 continue;
2227 }
2228 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002229 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 old_context = readl(msg+12);
2231 writel(old_context, reply+12);
2232 adpt_send_nop(pHba, old_m);
2233 }
2234 context = readl(reply+8);
2235 if(context & 0x40000000){ // IOCTL
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002236 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002237 if( p != NULL) {
2238 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 }
2240 // All IOCTLs will also be post wait
2241 }
2242 if(context & 0x80000000){ // Post wait message
2243 status = readl(reply+16);
2244 if(status >> 24){
2245 status &= 0xffff; /* Get detail status */
2246 } else {
2247 status = I2O_POST_WAIT_OK;
2248 }
2249 if(!(context & 0x40000000)) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002250 cmd = adpt_cmd_from_context(pHba,
2251 readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 if(cmd != NULL) {
2253 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2254 }
2255 }
2256 adpt_i2o_post_wait_complete(context, status);
2257 } else { // SCSI message
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002258 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 if(cmd != NULL){
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002260 scsi_dma_unmap(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 if(cmd->serial_number != 0) { // If not timedout
2262 adpt_i2o_to_scsi(reply, cmd);
2263 }
2264 }
2265 }
2266 writel(m, pHba->reply_port);
2267 wmb();
2268 rmb();
2269 }
2270 handled = 1;
2271out: if(pHba->host)
2272 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2273 return IRQ_RETVAL(handled);
2274}
2275
2276static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2277{
2278 int i;
2279 u32 msg[MAX_MESSAGE_SIZE];
2280 u32* mptr;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002281 u32* lptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 u32 *lenptr;
2283 int direction;
2284 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002285 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 u32 len;
2287 u32 reqlen;
2288 s32 rcode;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002289 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
2291 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002292 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 direction = 0x00000000;
2294
2295 scsidir = 0x00000000; // DATA NO XFER
2296 if(len) {
2297 /*
2298 * Set SCBFlags to indicate if data is being transferred
2299 * in or out, or no data transfer
2300 * Note: Do not have to verify index is less than 0 since
2301 * cmd->cmnd[0] is an unsigned char
2302 */
2303 switch(cmd->sc_data_direction){
2304 case DMA_FROM_DEVICE:
2305 scsidir =0x40000000; // DATA IN (iop<--dev)
2306 break;
2307 case DMA_TO_DEVICE:
2308 direction=0x04000000; // SGL OUT
2309 scsidir =0x80000000; // DATA OUT (iop-->dev)
2310 break;
2311 case DMA_NONE:
2312 break;
2313 case DMA_BIDIRECTIONAL:
2314 scsidir =0x40000000; // DATA IN (iop<--dev)
2315 // Assume In - and continue;
2316 break;
2317 default:
2318 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2319 pHba->name, cmd->cmnd[0]);
2320 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2321 cmd->scsi_done(cmd);
2322 return 0;
2323 }
2324 }
2325 // msg[0] is set later
2326 // I2O_CMD_SCSI_EXEC
2327 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2328 msg[2] = 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002329 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 // Our cards use the transaction context as the tag for queueing
2331 // Adaptec/DPT Private stuff
2332 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2333 msg[5] = d->tid;
2334 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2335 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2336 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2337 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2338 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2339
2340 mptr=msg+7;
2341
2342 // Write SCSI command into the message - always 16 byte block
2343 memset(mptr, 0, 16);
2344 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2345 mptr+=4;
2346 lenptr=mptr++; /* Remember me - fill in when we know */
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002347 if (dpt_dma64(pHba)) {
2348 reqlen = 16; // SINGLE SGE
2349 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2350 *mptr++ = 1 << PAGE_SHIFT;
2351 } else {
2352 reqlen = 14; // SINGLE SGE
2353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002356 nseg = scsi_dma_map(cmd);
2357 BUG_ON(nseg < 0);
2358 if (nseg) {
2359 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360
2361 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002362 scsi_for_each_sg(cmd, sg, nseg, i) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002363 lptr = mptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2365 len+=sg_dma_len(sg);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002366 addr = sg_dma_address(sg);
2367 *mptr++ = dma_low(addr);
2368 if (dpt_dma64(pHba))
2369 *mptr++ = dma_high(addr);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002370 /* Make this an end of list */
2371 if (i == nseg - 1)
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002372 *lptr = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 reqlen = mptr - msg;
2375 *lenptr = len;
2376
2377 if(cmd->underflow && len != cmd->underflow){
2378 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2379 len, cmd->underflow);
2380 }
2381 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002382 *lenptr = len = 0;
2383 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 }
2385
2386 /* Stick the headers on */
2387 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2388
2389 // Send it on it's way
2390 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2391 if (rcode == 0) {
2392 return 0;
2393 }
2394 return rcode;
2395}
2396
2397
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002398static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
Andrew Morton24601bb2007-12-10 15:49:20 -08002399{
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002400 struct Scsi_Host *host;
Andrew Morton24601bb2007-12-10 15:49:20 -08002401
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002402 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
Andrew Morton24601bb2007-12-10 15:49:20 -08002403 if (host == NULL) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002404 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
Andrew Morton24601bb2007-12-10 15:49:20 -08002405 return -1;
2406 }
2407 host->hostdata[0] = (unsigned long)pHba;
2408 pHba->host = host;
2409
2410 host->irq = pHba->pDev->irq;
2411 /* no IO ports, so don't have to set host->io_port and
2412 * host->n_io_port
2413 */
2414 host->io_port = 0;
2415 host->n_io_port = 0;
2416 /* see comments in scsi_host.h */
2417 host->max_id = 16;
2418 host->max_lun = 256;
2419 host->max_channel = pHba->top_scsi_channel + 1;
2420 host->cmd_per_lun = 1;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002421 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
Andrew Morton24601bb2007-12-10 15:49:20 -08002422 host->sg_tablesize = pHba->sg_tablesize;
2423 host->can_queue = pHba->post_fifo_size;
2424
2425 return 0;
2426}
2427
2428
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002429static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430{
2431 adpt_hba* pHba;
2432 u32 hba_status;
2433 u32 dev_status;
2434 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2435 // I know this would look cleaner if I just read bytes
2436 // but the model I have been using for all the rest of the
2437 // io is in 4 byte words - so I keep that model
2438 u16 detailed_status = readl(reply+16) &0xffff;
2439 dev_status = (detailed_status & 0xff);
2440 hba_status = detailed_status >> 8;
2441
2442 // calculate resid for sg
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002443 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
2445 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2446
2447 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2448
2449 if(!(reply_flags & MSG_FAIL)) {
2450 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2451 case I2O_SCSI_DSC_SUCCESS:
2452 cmd->result = (DID_OK << 16);
2453 // handle underflow
2454 if(readl(reply+5) < cmd->underflow ) {
2455 cmd->result = (DID_ERROR <<16);
2456 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2457 }
2458 break;
2459 case I2O_SCSI_DSC_REQUEST_ABORTED:
2460 cmd->result = (DID_ABORT << 16);
2461 break;
2462 case I2O_SCSI_DSC_PATH_INVALID:
2463 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2464 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2465 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2466 case I2O_SCSI_DSC_NO_ADAPTER:
2467 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2468 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2469 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2470 cmd->result = (DID_TIME_OUT << 16);
2471 break;
2472 case I2O_SCSI_DSC_ADAPTER_BUSY:
2473 case I2O_SCSI_DSC_BUS_BUSY:
2474 cmd->result = (DID_BUS_BUSY << 16);
2475 break;
2476 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2477 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2478 cmd->result = (DID_RESET << 16);
2479 break;
2480 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2481 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2482 cmd->result = (DID_PARITY << 16);
2483 break;
2484 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2485 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2486 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2487 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2488 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2489 case I2O_SCSI_DSC_DATA_OVERRUN:
2490 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2491 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2492 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2493 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2494 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2495 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2496 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2497 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2498 case I2O_SCSI_DSC_INVALID_CDB:
2499 case I2O_SCSI_DSC_LUN_INVALID:
2500 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2501 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2502 case I2O_SCSI_DSC_NO_NEXUS:
2503 case I2O_SCSI_DSC_CDB_RECEIVED:
2504 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2505 case I2O_SCSI_DSC_QUEUE_FROZEN:
2506 case I2O_SCSI_DSC_REQUEST_INVALID:
2507 default:
2508 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2509 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2510 hba_status, dev_status, cmd->cmnd[0]);
2511 cmd->result = (DID_ERROR << 16);
2512 break;
2513 }
2514
2515 // copy over the request sense data if it was a check
2516 // condition status
Salyzyn, Markd814c512008-01-14 11:04:40 -08002517 if (dev_status == SAM_STAT_CHECK_CONDITION) {
FUJITA Tomonorib80ca4f2008-01-13 15:46:13 +09002518 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002520 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2522 cmd->sense_buffer[2] == DATA_PROTECT ){
2523 /* This is to handle an array failed */
2524 cmd->result = (DID_TIME_OUT << 16);
2525 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2526 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2527 hba_status, dev_status, cmd->cmnd[0]);
2528
2529 }
2530 }
2531 } else {
2532 /* In this condtion we could not talk to the tid
2533 * the card rejected it. We should signal a retry
2534 * for a limitted number of retries.
2535 */
2536 cmd->result = (DID_TIME_OUT << 16);
2537 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2538 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2539 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2540 }
2541
2542 cmd->result |= (dev_status);
2543
2544 if(cmd->scsi_done != NULL){
2545 cmd->scsi_done(cmd);
2546 }
2547 return cmd->result;
2548}
2549
2550
2551static s32 adpt_rescan(adpt_hba* pHba)
2552{
2553 s32 rcode;
2554 ulong flags = 0;
2555
2556 if(pHba->host)
2557 spin_lock_irqsave(pHba->host->host_lock, flags);
2558 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2559 goto out;
2560 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2561 goto out;
2562 rcode = 0;
2563out: if(pHba->host)
2564 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2565 return rcode;
2566}
2567
2568
2569static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2570{
2571 int i;
2572 int max;
2573 int tid;
2574 struct i2o_device *d;
2575 i2o_lct *lct = pHba->lct;
2576 u8 bus_no = 0;
2577 s16 scsi_id;
2578 s16 scsi_lun;
2579 u32 buf[10]; // at least 8 u32's
2580 struct adpt_device* pDev = NULL;
2581 struct i2o_device* pI2o_dev = NULL;
2582
2583 if (lct == NULL) {
2584 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2585 return -1;
2586 }
2587
2588 max = lct->table_size;
2589 max -= 3;
2590 max /= 9;
2591
2592 // Mark each drive as unscanned
2593 for (d = pHba->devices; d; d = d->next) {
2594 pDev =(struct adpt_device*) d->owner;
2595 if(!pDev){
2596 continue;
2597 }
2598 pDev->state |= DPTI_DEV_UNSCANNED;
2599 }
2600
2601 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2602
2603 for(i=0;i<max;i++) {
2604 if( lct->lct_entry[i].user_tid != 0xfff){
2605 continue;
2606 }
2607
2608 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2609 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2610 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2611 tid = lct->lct_entry[i].tid;
2612 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2613 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2614 continue;
2615 }
2616 bus_no = buf[0]>>16;
2617 scsi_id = buf[1];
2618 scsi_lun = (buf[2]>>8 )&0xff;
2619 pDev = pHba->channel[bus_no].device[scsi_id];
2620 /* da lun */
2621 while(pDev) {
2622 if(pDev->scsi_lun == scsi_lun) {
2623 break;
2624 }
2625 pDev = pDev->next_lun;
2626 }
2627 if(!pDev ) { // Something new add it
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002628 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 if(d==NULL)
2630 {
2631 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2632 return -ENOMEM;
2633 }
2634
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002635 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 d->next = NULL;
2637
2638 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2639
2640 d->flags = 0;
2641 adpt_i2o_report_hba_unit(pHba, d);
2642 adpt_i2o_install_device(pHba, d);
2643
2644 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2645 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2646 continue;
2647 }
2648 pDev = pHba->channel[bus_no].device[scsi_id];
2649 if( pDev == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302650 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 if(pDev == NULL) {
2652 return -ENOMEM;
2653 }
2654 pHba->channel[bus_no].device[scsi_id] = pDev;
2655 } else {
2656 while (pDev->next_lun) {
2657 pDev = pDev->next_lun;
2658 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05302659 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 if(pDev == NULL) {
2661 return -ENOMEM;
2662 }
2663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 pDev->tid = d->lct_data.tid;
2665 pDev->scsi_channel = bus_no;
2666 pDev->scsi_id = scsi_id;
2667 pDev->scsi_lun = scsi_lun;
2668 pDev->pI2o_dev = d;
2669 d->owner = pDev;
2670 pDev->type = (buf[0])&0xff;
2671 pDev->flags = (buf[0]>>8)&0xff;
2672 // Too late, SCSI system has made up it's mind, but what the hey ...
2673 if(scsi_id > pHba->top_scsi_id){
2674 pHba->top_scsi_id = scsi_id;
2675 }
2676 if(scsi_lun > pHba->top_scsi_lun){
2677 pHba->top_scsi_lun = scsi_lun;
2678 }
2679 continue;
2680 } // end of new i2o device
2681
2682 // We found an old device - check it
2683 while(pDev) {
2684 if(pDev->scsi_lun == scsi_lun) {
2685 if(!scsi_device_online(pDev->pScsi_dev)) {
2686 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2687 pHba->name,bus_no,scsi_id,scsi_lun);
2688 if (pDev->pScsi_dev) {
2689 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2690 }
2691 }
2692 d = pDev->pI2o_dev;
2693 if(d->lct_data.tid != tid) { // something changed
2694 pDev->tid = tid;
2695 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2696 if (pDev->pScsi_dev) {
2697 pDev->pScsi_dev->changed = TRUE;
2698 pDev->pScsi_dev->removable = TRUE;
2699 }
2700 }
2701 // Found it - mark it scanned
2702 pDev->state = DPTI_DEV_ONLINE;
2703 break;
2704 }
2705 pDev = pDev->next_lun;
2706 }
2707 }
2708 }
2709 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2710 pDev =(struct adpt_device*) pI2o_dev->owner;
2711 if(!pDev){
2712 continue;
2713 }
2714 // Drive offline drives that previously existed but could not be found
2715 // in the LCT table
2716 if (pDev->state & DPTI_DEV_UNSCANNED){
2717 pDev->state = DPTI_DEV_OFFLINE;
2718 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2719 if (pDev->pScsi_dev) {
2720 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2721 }
2722 }
2723 }
2724 return 0;
2725}
2726
2727static void adpt_fail_posted_scbs(adpt_hba* pHba)
2728{
2729 struct scsi_cmnd* cmd = NULL;
2730 struct scsi_device* d = NULL;
2731
2732 shost_for_each_device(d, pHba->host) {
2733 unsigned long flags;
2734 spin_lock_irqsave(&d->list_lock, flags);
2735 list_for_each_entry(cmd, &d->cmd_list, list) {
2736 if(cmd->serial_number == 0){
2737 continue;
2738 }
2739 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2740 cmd->scsi_done(cmd);
2741 }
2742 spin_unlock_irqrestore(&d->list_lock, flags);
2743 }
2744}
2745
2746
2747/*============================================================================
2748 * Routines from i2o subsystem
2749 *============================================================================
2750 */
2751
2752
2753
2754/*
2755 * Bring an I2O controller into HOLD state. See the spec.
2756 */
2757static int adpt_i2o_activate_hba(adpt_hba* pHba)
2758{
2759 int rcode;
2760
2761 if(pHba->initialized ) {
2762 if (adpt_i2o_status_get(pHba) < 0) {
2763 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2764 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2765 return rcode;
2766 }
2767 if (adpt_i2o_status_get(pHba) < 0) {
2768 printk(KERN_INFO "HBA not responding.\n");
2769 return -1;
2770 }
2771 }
2772
2773 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2774 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2775 return -1;
2776 }
2777
2778 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2779 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2780 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2781 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2782 adpt_i2o_reset_hba(pHba);
2783 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2784 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2785 return -1;
2786 }
2787 }
2788 } else {
2789 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2790 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2791 return rcode;
2792 }
2793
2794 }
2795
2796 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2797 return -1;
2798 }
2799
2800 /* In HOLD state */
2801
2802 if (adpt_i2o_hrt_get(pHba) < 0) {
2803 return -1;
2804 }
2805
2806 return 0;
2807}
2808
2809/*
2810 * Bring a controller online into OPERATIONAL state.
2811 */
2812
2813static int adpt_i2o_online_hba(adpt_hba* pHba)
2814{
2815 if (adpt_i2o_systab_send(pHba) < 0) {
2816 adpt_i2o_delete_hba(pHba);
2817 return -1;
2818 }
2819 /* In READY state */
2820
2821 if (adpt_i2o_enable_hba(pHba) < 0) {
2822 adpt_i2o_delete_hba(pHba);
2823 return -1;
2824 }
2825
2826 /* In OPERATIONAL state */
2827 return 0;
2828}
2829
2830static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2831{
2832 u32 __iomem *msg;
2833 ulong timeout = jiffies + 5*HZ;
2834
2835 while(m == EMPTY_QUEUE){
2836 rmb();
2837 m = readl(pHba->post_port);
2838 if(m != EMPTY_QUEUE){
2839 break;
2840 }
2841 if(time_after(jiffies,timeout)){
2842 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2843 return 2;
2844 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002845 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2848 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2849 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2850 writel( 0,&msg[2]);
2851 wmb();
2852
2853 writel(m, pHba->post_port);
2854 wmb();
2855 return 0;
2856}
2857
2858static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2859{
2860 u8 *status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002861 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 u32 __iomem *msg = NULL;
2863 int i;
2864 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 u32 m;
2866
2867 do {
2868 rmb();
2869 m = readl(pHba->post_port);
2870 if (m != EMPTY_QUEUE) {
2871 break;
2872 }
2873
2874 if(time_after(jiffies,timeout)){
2875 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2876 return -ETIMEDOUT;
2877 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002878 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 } while(m == EMPTY_QUEUE);
2880
2881 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2882
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002883 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002884 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 adpt_send_nop(pHba, m);
2886 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2887 pHba->name);
2888 return -ENOMEM;
2889 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002890 memset(status, 0, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
2892 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2893 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2894 writel(0, &msg[2]);
2895 writel(0x0106, &msg[3]); /* Transaction context */
2896 writel(4096, &msg[4]); /* Host page frame size */
2897 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2898 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002899 writel((u32)addr, &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
2901 writel(m, pHba->post_port);
2902 wmb();
2903
2904 // Wait for the reply status to come back
2905 do {
2906 if (*status) {
2907 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2908 break;
2909 }
2910 }
2911 rmb();
2912 if(time_after(jiffies,timeout)){
2913 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002914 /* We lose 4 bytes of "status" here, but we
2915 cannot free these because controller may
2916 awake and corrupt those bytes at any time */
2917 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 return -ETIMEDOUT;
2919 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002920 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 } while (1);
2922
2923 // If the command was successful, fill the fifo with our reply
2924 // message packets
2925 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002926 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 return -2;
2928 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002929 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002931 if(pHba->reply_pool != NULL) {
2932 dma_free_coherent(&pHba->pDev->dev,
2933 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2934 pHba->reply_pool, pHba->reply_pool_pa);
2935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002937 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2938 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2939 &pHba->reply_pool_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002940 if (!pHba->reply_pool) {
2941 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2942 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002944 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 for(i = 0; i < pHba->reply_fifo_size; i++) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002947 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2948 pHba->reply_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 }
2951 adpt_i2o_status_get(pHba);
2952 return 0;
2953}
2954
2955
2956/*
2957 * I2O System Table. Contains information about
2958 * all the IOPs in the system. Used to inform IOPs
2959 * about each other's existence.
2960 *
2961 * sys_tbl_ver is the CurrentChangeIndicator that is
2962 * used by IOPs to track changes.
2963 */
2964
2965
2966
2967static s32 adpt_i2o_status_get(adpt_hba* pHba)
2968{
2969 ulong timeout;
2970 u32 m;
2971 u32 __iomem *msg;
2972 u8 *status_block=NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
2974 if(pHba->status_block == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002975 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2976 sizeof(i2o_status_block),
2977 &pHba->status_block_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 if(pHba->status_block == NULL) {
2979 printk(KERN_ERR
2980 "dpti%d: Get Status Block failed; Out of memory. \n",
2981 pHba->unit);
2982 return -ENOMEM;
2983 }
2984 }
2985 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2986 status_block = (u8*)(pHba->status_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2988 do {
2989 rmb();
2990 m = readl(pHba->post_port);
2991 if (m != EMPTY_QUEUE) {
2992 break;
2993 }
2994 if(time_after(jiffies,timeout)){
2995 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2996 pHba->name);
2997 return -ETIMEDOUT;
2998 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002999 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 } while(m==EMPTY_QUEUE);
3001
3002
3003 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3004
3005 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3006 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3007 writel(1, &msg[2]);
3008 writel(0, &msg[3]);
3009 writel(0, &msg[4]);
3010 writel(0, &msg[5]);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003011 writel( dma_low(pHba->status_block_pa), &msg[6]);
3012 writel( dma_high(pHba->status_block_pa), &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3014
3015 //post message
3016 writel(m, pHba->post_port);
3017 wmb();
3018
3019 while(status_block[87]!=0xff){
3020 if(time_after(jiffies,timeout)){
3021 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3022 pHba->unit);
3023 return -ETIMEDOUT;
3024 }
3025 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08003026 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 }
3028
3029 // Set up our number of outbound and inbound messages
3030 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3031 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3032 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3033 }
3034
3035 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3036 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3037 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3038 }
3039
3040 // Calculate the Scatter Gather list size
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003041 if (dpt_dma64(pHba)) {
3042 pHba->sg_tablesize
3043 = ((pHba->status_block->inbound_frame_size * 4
3044 - 14 * sizeof(u32))
3045 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3046 } else {
3047 pHba->sg_tablesize
3048 = ((pHba->status_block->inbound_frame_size * 4
3049 - 12 * sizeof(u32))
3050 / sizeof(struct sg_simple_element));
3051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3053 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3054 }
3055
3056
3057#ifdef DEBUG
3058 printk("dpti%d: State = ",pHba->unit);
3059 switch(pHba->status_block->iop_state) {
3060 case 0x01:
3061 printk("INIT\n");
3062 break;
3063 case 0x02:
3064 printk("RESET\n");
3065 break;
3066 case 0x04:
3067 printk("HOLD\n");
3068 break;
3069 case 0x05:
3070 printk("READY\n");
3071 break;
3072 case 0x08:
3073 printk("OPERATIONAL\n");
3074 break;
3075 case 0x10:
3076 printk("FAILED\n");
3077 break;
3078 case 0x11:
3079 printk("FAULTED\n");
3080 break;
3081 default:
3082 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3083 }
3084#endif
3085 return 0;
3086}
3087
3088/*
3089 * Get the IOP's Logical Configuration Table
3090 */
3091static int adpt_i2o_lct_get(adpt_hba* pHba)
3092{
3093 u32 msg[8];
3094 int ret;
3095 u32 buf[16];
3096
3097 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3098 pHba->lct_size = pHba->status_block->expected_lct_size;
3099 }
3100 do {
3101 if (pHba->lct == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003102 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3103 pHba->lct_size, &pHba->lct_pa,
3104 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 if(pHba->lct == NULL) {
3106 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3107 pHba->name);
3108 return -ENOMEM;
3109 }
3110 }
3111 memset(pHba->lct, 0, pHba->lct_size);
3112
3113 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3114 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3115 msg[2] = 0;
3116 msg[3] = 0;
3117 msg[4] = 0xFFFFFFFF; /* All devices */
3118 msg[5] = 0x00000000; /* Report now */
3119 msg[6] = 0xD0000000|pHba->lct_size;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003120 msg[7] = (u32)pHba->lct_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121
3122 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3123 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3124 pHba->name, ret);
3125 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3126 return ret;
3127 }
3128
3129 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3130 pHba->lct_size = pHba->lct->table_size << 2;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003131 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3132 pHba->lct, pHba->lct_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 pHba->lct = NULL;
3134 }
3135 } while (pHba->lct == NULL);
3136
3137 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3138
3139
3140 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3141 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3142 pHba->FwDebugBufferSize = buf[1];
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003143 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3144 pHba->FwDebugBufferSize);
3145 if (pHba->FwDebugBuffer_P) {
3146 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3147 FW_DEBUG_FLAGS_OFFSET;
3148 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3149 FW_DEBUG_BLED_OFFSET;
3150 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3151 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3152 FW_DEBUG_STR_LENGTH_OFFSET;
3153 pHba->FwDebugBuffer_P += buf[2];
3154 pHba->FwDebugFlags = 0;
3155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 }
3157
3158 return 0;
3159}
3160
3161static int adpt_i2o_build_sys_table(void)
3162{
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003163 adpt_hba* pHba = hba_chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 int count = 0;
3165
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003166 if (sys_tbl)
3167 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3168 sys_tbl, sys_tbl_pa);
3169
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3171 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3172
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003173 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3174 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02003175 if (!sys_tbl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3177 return -ENOMEM;
3178 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003179 memset(sys_tbl, 0, sys_tbl_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180
3181 sys_tbl->num_entries = hba_count;
3182 sys_tbl->version = I2OVERSION;
3183 sys_tbl->change_ind = sys_tbl_ind++;
3184
3185 for(pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003186 u64 addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 // Get updated Status Block so we have the latest information
3188 if (adpt_i2o_status_get(pHba)) {
3189 sys_tbl->num_entries--;
3190 continue; // try next one
3191 }
3192
3193 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3194 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3195 sys_tbl->iops[count].seg_num = 0;
3196 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3197 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3198 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3199 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3200 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3201 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003202 addr = pHba->base_addr_phys + 0x40;
3203 sys_tbl->iops[count].inbound_low = dma_low(addr);
3204 sys_tbl->iops[count].inbound_high = dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205
3206 count++;
3207 }
3208
3209#ifdef DEBUG
3210{
3211 u32 *table = (u32*)sys_tbl;
3212 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3213 for(count = 0; count < (sys_tbl_len >>2); count++) {
3214 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3215 count, table[count]);
3216 }
3217}
3218#endif
3219
3220 return 0;
3221}
3222
3223
3224/*
3225 * Dump the information block associated with a given unit (TID)
3226 */
3227
3228static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3229{
3230 char buf[64];
3231 int unit = d->lct_data.tid;
3232
3233 printk(KERN_INFO "TID %3.3d ", unit);
3234
3235 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3236 {
3237 buf[16]=0;
3238 printk(" Vendor: %-12.12s", buf);
3239 }
3240 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3241 {
3242 buf[16]=0;
3243 printk(" Device: %-12.12s", buf);
3244 }
3245 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3246 {
3247 buf[8]=0;
3248 printk(" Rev: %-12.12s\n", buf);
3249 }
3250#ifdef DEBUG
3251 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3252 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3253 printk(KERN_INFO "\tFlags: ");
3254
3255 if(d->lct_data.device_flags&(1<<0))
3256 printk("C"); // ConfigDialog requested
3257 if(d->lct_data.device_flags&(1<<1))
3258 printk("U"); // Multi-user capable
3259 if(!(d->lct_data.device_flags&(1<<4)))
3260 printk("P"); // Peer service enabled!
3261 if(!(d->lct_data.device_flags&(1<<5)))
3262 printk("M"); // Mgmt service enabled!
3263 printk("\n");
3264#endif
3265}
3266
3267#ifdef DEBUG
3268/*
3269 * Do i2o class name lookup
3270 */
3271static const char *adpt_i2o_get_class_name(int class)
3272{
3273 int idx = 16;
3274 static char *i2o_class_name[] = {
3275 "Executive",
3276 "Device Driver Module",
3277 "Block Device",
3278 "Tape Device",
3279 "LAN Interface",
3280 "WAN Interface",
3281 "Fibre Channel Port",
3282 "Fibre Channel Device",
3283 "SCSI Device",
3284 "ATE Port",
3285 "ATE Device",
3286 "Floppy Controller",
3287 "Floppy Device",
3288 "Secondary Bus Port",
3289 "Peer Transport Agent",
3290 "Peer Transport",
3291 "Unknown"
3292 };
3293
3294 switch(class&0xFFF) {
3295 case I2O_CLASS_EXECUTIVE:
3296 idx = 0; break;
3297 case I2O_CLASS_DDM:
3298 idx = 1; break;
3299 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3300 idx = 2; break;
3301 case I2O_CLASS_SEQUENTIAL_STORAGE:
3302 idx = 3; break;
3303 case I2O_CLASS_LAN:
3304 idx = 4; break;
3305 case I2O_CLASS_WAN:
3306 idx = 5; break;
3307 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3308 idx = 6; break;
3309 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3310 idx = 7; break;
3311 case I2O_CLASS_SCSI_PERIPHERAL:
3312 idx = 8; break;
3313 case I2O_CLASS_ATE_PORT:
3314 idx = 9; break;
3315 case I2O_CLASS_ATE_PERIPHERAL:
3316 idx = 10; break;
3317 case I2O_CLASS_FLOPPY_CONTROLLER:
3318 idx = 11; break;
3319 case I2O_CLASS_FLOPPY_DEVICE:
3320 idx = 12; break;
3321 case I2O_CLASS_BUS_ADAPTER_PORT:
3322 idx = 13; break;
3323 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3324 idx = 14; break;
3325 case I2O_CLASS_PEER_TRANSPORT:
3326 idx = 15; break;
3327 }
3328 return i2o_class_name[idx];
3329}
3330#endif
3331
3332
3333static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3334{
3335 u32 msg[6];
3336 int ret, size = sizeof(i2o_hrt);
3337
3338 do {
3339 if (pHba->hrt == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003340 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3341 size, &pHba->hrt_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 if (pHba->hrt == NULL) {
3343 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3344 return -ENOMEM;
3345 }
3346 }
3347
3348 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3349 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3350 msg[2]= 0;
3351 msg[3]= 0;
3352 msg[4]= (0xD0000000 | size); /* Simple transaction */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003353 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
3355 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3356 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3357 return ret;
3358 }
3359
3360 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003361 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3362 dma_free_coherent(&pHba->pDev->dev, size,
3363 pHba->hrt, pHba->hrt_pa);
3364 size = newsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 pHba->hrt = NULL;
3366 }
3367 } while(pHba->hrt == NULL);
3368 return 0;
3369}
3370
3371/*
3372 * Query one scalar group value or a whole scalar group.
3373 */
3374static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3375 int group, int field, void *buf, int buflen)
3376{
3377 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003378 u8 *opblk_va;
3379 dma_addr_t opblk_pa;
3380 u8 *resblk_va;
3381 dma_addr_t resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
3383 int size;
3384
3385 /* 8 bytes for header */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003386 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3387 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3388 if (resblk_va == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3390 return -ENOMEM;
3391 }
3392
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003393 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3394 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3395 if (opblk_va == NULL) {
3396 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3397 resblk_va, resblk_pa);
3398 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3399 pHba->name);
3400 return -ENOMEM;
3401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 if (field == -1) /* whole group */
3403 opblk[4] = -1;
3404
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003405 memcpy(opblk_va, opblk, sizeof(opblk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003407 opblk_va, opblk_pa, sizeof(opblk),
3408 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3409 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 if (size == -ETIME) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003411 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3412 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3414 return -ETIME;
3415 } else if (size == -EINTR) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003416 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3417 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3419 return -EINTR;
3420 }
3421
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003422 memcpy(buf, resblk_va+8, buflen); /* cut off header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003424 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3425 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 if (size < 0)
3427 return size;
3428
3429 return buflen;
3430}
3431
3432
3433/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3434 *
3435 * This function can be used for all UtilParamsGet/Set operations.
3436 * The OperationBlock is given in opblk-buffer,
3437 * and results are returned in resblk-buffer.
3438 * Note that the minimum sized resblk is 8 bytes and contains
3439 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3440 */
3441static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003442 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3443 void *resblk_va, dma_addr_t resblk_pa, int reslen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444{
3445 u32 msg[9];
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003446 u32 *res = (u32 *)resblk_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 int wait_status;
3448
3449 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3450 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3451 msg[2] = 0;
3452 msg[3] = 0;
3453 msg[4] = 0;
3454 msg[5] = 0x54000000 | oplen; /* OperationBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003455 msg[6] = (u32)opblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003457 msg[8] = (u32)resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003460 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 return wait_status; /* -DetailedStatus */
3462 }
3463
3464 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3465 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3466 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3467 pHba->name,
3468 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3469 : "PARAMS_GET",
3470 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3471 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3472 }
3473
3474 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3475}
3476
3477
3478static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3479{
3480 u32 msg[4];
3481 int ret;
3482
3483 adpt_i2o_status_get(pHba);
3484
3485 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3486
3487 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3488 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3489 return 0;
3490 }
3491
3492 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3493 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3494 msg[2] = 0;
3495 msg[3] = 0;
3496
3497 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3498 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3499 pHba->unit, -ret);
3500 } else {
3501 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3502 }
3503
3504 adpt_i2o_status_get(pHba);
3505 return ret;
3506}
3507
3508
3509/*
3510 * Enable IOP. Allows the IOP to resume external operations.
3511 */
3512static int adpt_i2o_enable_hba(adpt_hba* pHba)
3513{
3514 u32 msg[4];
3515 int ret;
3516
3517 adpt_i2o_status_get(pHba);
3518 if(!pHba->status_block){
3519 return -ENOMEM;
3520 }
3521 /* Enable only allowed on READY state */
3522 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3523 return 0;
3524
3525 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3526 return -EINVAL;
3527
3528 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3529 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3530 msg[2]= 0;
3531 msg[3]= 0;
3532
3533 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3534 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3535 pHba->name, ret);
3536 } else {
3537 PDEBUG("%s: Enabled.\n", pHba->name);
3538 }
3539
3540 adpt_i2o_status_get(pHba);
3541 return ret;
3542}
3543
3544
3545static int adpt_i2o_systab_send(adpt_hba* pHba)
3546{
3547 u32 msg[12];
3548 int ret;
3549
3550 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3551 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3552 msg[2] = 0;
3553 msg[3] = 0;
3554 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3555 msg[5] = 0; /* Segment 0 */
3556
3557 /*
3558 * Provide three SGL-elements:
3559 * System table (SysTab), Private memory space declaration and
3560 * Private i/o space declaration
3561 */
3562 msg[6] = 0x54000000 | sys_tbl_len;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003563 msg[7] = (u32)sys_tbl_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 msg[8] = 0x54000000 | 0;
3565 msg[9] = 0;
3566 msg[10] = 0xD4000000 | 0;
3567 msg[11] = 0;
3568
3569 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3570 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3571 pHba->name, ret);
3572 }
3573#ifdef DEBUG
3574 else {
3575 PINFO("%s: SysTab set.\n", pHba->name);
3576 }
3577#endif
3578
3579 return ret;
3580 }
3581
3582
3583/*============================================================================
3584 *
3585 *============================================================================
3586 */
3587
3588
3589#ifdef UARTDELAY
3590
3591static static void adpt_delay(int millisec)
3592{
3593 int i;
3594 for (i = 0; i < millisec; i++) {
3595 udelay(1000); /* delay for one millisecond */
3596 }
3597}
3598
3599#endif
3600
Andrew Morton24601bb2007-12-10 15:49:20 -08003601static struct scsi_host_template driver_template = {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003602 .module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 .name = "dpt_i2o",
3604 .proc_name = "dpt_i2o",
3605 .proc_info = adpt_proc_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 .info = adpt_info,
3607 .queuecommand = adpt_queue,
3608 .eh_abort_handler = adpt_abort,
3609 .eh_device_reset_handler = adpt_device_reset,
3610 .eh_bus_reset_handler = adpt_bus_reset,
3611 .eh_host_reset_handler = adpt_reset,
3612 .bios_param = adpt_bios_param,
3613 .slave_configure = adpt_slave_configure,
3614 .can_queue = MAX_TO_IOP_MESSAGES,
3615 .this_id = 7,
3616 .cmd_per_lun = 1,
3617 .use_clustering = ENABLE_CLUSTERING,
3618};
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003619
3620static int __init adpt_init(void)
3621{
3622 int error;
3623 adpt_hba *pHba, *next;
3624
3625 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3626
3627 error = adpt_detect(&driver_template);
3628 if (error < 0)
3629 return error;
3630 if (hba_chain == NULL)
3631 return -ENODEV;
3632
3633 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3634 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3635 if (error)
3636 goto fail;
3637 scsi_scan_host(pHba->host);
3638 }
3639 return 0;
3640fail:
3641 for (pHba = hba_chain; pHba; pHba = next) {
3642 next = pHba->next;
3643 scsi_remove_host(pHba->host);
3644 }
3645 return error;
3646}
3647
3648static void __exit adpt_exit(void)
3649{
3650 adpt_hba *pHba, *next;
3651
3652 for (pHba = hba_chain; pHba; pHba = pHba->next)
3653 scsi_remove_host(pHba->host);
3654 for (pHba = hba_chain; pHba; pHba = next) {
3655 next = pHba->next;
3656 adpt_release(pHba->host);
3657 }
3658}
3659
3660module_init(adpt_init);
3661module_exit(adpt_exit);
3662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663MODULE_LICENSE("GPL");