blob: 67283ef418ac86fcc6d169033c7b68ae99379d0b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
Matthias Gehre910638a2006-03-28 01:56:48 -080053#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
Arjan van de Ven0b950672006-01-11 13:16:10 +010058#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
Arnd Bergmannc45d15d2010-06-02 14:28:52 +020078static DEFINE_MUTEX(adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
Arjan van de Ven0b950672006-01-11 13:16:10 +0100105static DEFINE_MUTEX(adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200115static struct class *adpt_sysfs_class;
116
Arnd Bergmannf4927c42010-04-27 00:24:01 +0200117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
Arjan van de Ven00977a52007-02-12 00:55:34 -0800122static const struct file_operations adpt_fops = {
Arnd Bergmannf4927c42010-04-27 00:24:01 +0200123 .unlocked_ioctl = adpt_unlocked_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 .open = adpt_open,
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#endif
Arnd Bergmann6038f372010-08-15 18:52:59 +0200129 .llseek = noop_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132/* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148/*============================================================================
149 * Functions
150 *============================================================================
151 */
152
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168static u8 adpt_read_blink_led(adpt_hba* host)
169{
Harvey Harrison172c1222008-04-28 16:50:03 -0700170 if (host->FwDebugBLEDflag_P) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178/*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
181 */
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
Andrew Morton24601bb2007-12-10 15:49:20 -0800190static int adpt_detect(struct scsi_host_template* sht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct pci_dev *pDev = NULL;
Dan Carpenter229bab62010-03-15 11:26:56 +0300193 adpt_hba *pHba;
194 adpt_hba *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
Alan Coxa07f3532006-09-15 15:34:32 +0100199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
Andrew Morton24601bb2007-12-10 15:49:20 -0800202 if(adpt_install_hba(sht, pDev) ){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
Alan Coxa07f3532006-09-15 15:34:32 +0100207 pci_dev_get(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209 }
210
211 /* In INIT state, Activate IOPs */
Dan Carpenter229bab62010-03-15 11:26:56 +0300212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
Dan Carpenter229bab62010-03-15 11:26:56 +0300250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
Dan Carpenter229bab62010-03-15 11:26:56 +0300271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +0200273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200279 if (adpt_sysfs_class) {
Greg Kroah-Hartmand73a1a62008-07-21 20:03:34 -0700280 struct device *dev = device_create(adpt_sysfs_class,
Greg Kroah-Hartman9def0b92008-05-21 12:52:33 -0700281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +0200282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
Andrew Morton24601bb2007-12-10 15:49:20 -0800295 adpt_i2o_sys_shutdown();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return 0;
297 }
298 return hba_count;
299}
300
301
Andrew Morton24601bb2007-12-10 15:49:20 -0800302/*
303 * scsi_unregister will be called AFTER we return.
304 */
305static int adpt_release(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Andrew Morton24601bb2007-12-10 15:49:20 -0800307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308// adpt_i2o_quiesce_hba(pHba);
309 adpt_i2o_delete_hba(pHba);
Andrew Morton24601bb2007-12-10 15:49:20 -0800310 scsi_unregister(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200317 u32 msg[17];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200325 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000; // DATA IN (iop<--dev)
340
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200341 if (dpt_dma64(pHba))
342 reqlen = 17; // SINGLE SGE, 64 bit
343 else
344 reqlen = 14; // SINGLE SGE, 32 bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 /* Stick the headers on */
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350 // Adaptec/DPT Private stuff
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362 // Write SCSI command into the message - always 16 byte block
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369 // Don't care about the rest of scb
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++; /* Remember me - fill in when we know */
374
375 /* Now fill in the SGList and command */
376 *lenptr = len;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 // Send it on it's way
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0'; /* precautionary */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
Jeff Garzikf2812332010-11-16 02:10:29 -0500426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL; /* dpt per device information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
431 cmd->scsi_done = done;
432 /*
433 * SCSI REQUEST_SENSE commands will be executed automatically by the
434 * Host Adapter for any errors, so they should not be executed
435 * explicitly unless the Sense Data is zero indicating that no error
436 * occurred.
437 */
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
Hannes Reinecke63d80c42013-10-23 10:51:16 +0200451 if ((pHba->state) & DPTI_STATE_RESET)
452 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 // TODO if the cmd->device if offline then I may need to issue a bus rescan
455 // followed by a get_lct to see if the device is there anymore
456 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
457 /*
458 * First command request for this device. Set up a pointer
459 * to the device structure. This should be a TEST_UNIT_READY
460 * command from scan_scsis_single.
461 */
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200462 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
464 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
465 cmd->result = (DID_NO_CONNECT << 16);
466 cmd->scsi_done(cmd);
467 return 0;
468 }
469 cmd->device->hostdata = pDev;
470 }
471 pDev->pScsi_dev = cmd->device;
472
473 /*
474 * If we are being called from when the device is being reset,
475 * delay processing of the command until later.
476 */
477 if (pDev->state & DPTI_DEV_RESET ) {
478 return FAILED;
479 }
480 return adpt_scsi_to_i2o(pHba, cmd, pDev);
481}
482
Jeff Garzikf2812332010-11-16 02:10:29 -0500483static DEF_SCSI_QCMD(adpt_queue)
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
486 sector_t capacity, int geom[])
487{
488 int heads=-1;
489 int sectors=-1;
490 int cylinders=-1;
491
492 // *** First lets set the default geometry ****
493
494 // If the capacity is less than ox2000
495 if (capacity < 0x2000 ) { // floppy
496 heads = 18;
497 sectors = 2;
498 }
499 // else if between 0x2000 and 0x20000
500 else if (capacity < 0x20000) {
501 heads = 64;
502 sectors = 32;
503 }
504 // else if between 0x20000 and 0x40000
505 else if (capacity < 0x40000) {
506 heads = 65;
507 sectors = 63;
508 }
509 // else if between 0x4000 and 0x80000
510 else if (capacity < 0x80000) {
511 heads = 128;
512 sectors = 63;
513 }
514 // else if greater than 0x80000
515 else {
516 heads = 255;
517 sectors = 63;
518 }
519 cylinders = sector_div(capacity, heads * sectors);
520
521 // Special case if CDROM
522 if(sdev->type == 5) { // CDROM
523 heads = 252;
524 sectors = 63;
525 cylinders = 1111;
526 }
527
528 geom[0] = heads;
529 geom[1] = sectors;
530 geom[2] = cylinders;
531
532 PDEBUG("adpt_bios_param: exit\n");
533 return 0;
534}
535
536
537static const char *adpt_info(struct Scsi_Host *host)
538{
539 adpt_hba* pHba;
540
541 pHba = (adpt_hba *) host->hostdata[0];
542 return (char *) (pHba->detail);
543}
544
Al Viroff98f7c2013-03-31 03:21:50 -0400545static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
547 struct adpt_device* d;
548 int id;
549 int chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 adpt_hba* pHba;
551 int unit;
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 // Find HBA (host bus adapter) we are looking for
Arjan van de Ven0b950672006-01-11 13:16:10 +0100554 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 for (pHba = hba_chain; pHba; pHba = pHba->next) {
556 if (pHba->host == host) {
557 break; /* found adapter */
558 }
559 }
Arjan van de Ven0b950672006-01-11 13:16:10 +0100560 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 if (pHba == NULL) {
562 return 0;
563 }
564 host = pHba->host;
565
Al Viroff98f7c2013-03-31 03:21:50 -0400566 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
567 seq_printf(m, "%s\n", pHba->detail);
568 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 pHba->host->host_no, pHba->name, host->irq);
Al Viroff98f7c2013-03-31 03:21:50 -0400570 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
572
Al Viroff98f7c2013-03-31 03:21:50 -0400573 seq_printf(m, "Devices:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 for(chan = 0; chan < MAX_CHANNEL; chan++) {
575 for(id = 0; id < MAX_ID; id++) {
576 d = pHba->channel[chan].device[id];
Al Viroff98f7c2013-03-31 03:21:50 -0400577 while(d) {
578 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
579 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 unit = d->pI2o_dev->lct_data.tid;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200582 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
583 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 scsi_device_online(d->pScsi_dev)? "online":"offline");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 d = d->next_lun;
586 }
587 }
588 }
Al Viroff98f7c2013-03-31 03:21:50 -0400589 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
591
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200592/*
593 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
594 */
595static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
596{
597 return (u32)cmd->serial_number;
598}
599
600/*
601 * Go from a u32 'context' to a struct scsi_cmnd * .
602 * This could probably be made more efficient.
603 */
604static struct scsi_cmnd *
605 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
606{
607 struct scsi_cmnd * cmd;
608 struct scsi_device * d;
609
610 if (context == 0)
611 return NULL;
612
613 spin_unlock(pHba->host->host_lock);
614 shost_for_each_device(d, pHba->host) {
615 unsigned long flags;
616 spin_lock_irqsave(&d->list_lock, flags);
617 list_for_each_entry(cmd, &d->cmd_list, list) {
618 if (((u32)cmd->serial_number == context)) {
619 spin_unlock_irqrestore(&d->list_lock, flags);
620 scsi_device_put(d);
621 spin_lock(pHba->host->host_lock);
622 return cmd;
623 }
624 }
625 spin_unlock_irqrestore(&d->list_lock, flags);
626 }
627 spin_lock(pHba->host->host_lock);
628
629 return NULL;
630}
631
632/*
633 * Turn a pointer to ioctl reply data into an u32 'context'
634 */
635static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
636{
637#if BITS_PER_LONG == 32
638 return (u32)(unsigned long)reply;
639#else
640 ulong flags = 0;
641 u32 nr, i;
642
643 spin_lock_irqsave(pHba->host->host_lock, flags);
644 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
645 for (i = 0; i < nr; i++) {
646 if (pHba->ioctl_reply_context[i] == NULL) {
647 pHba->ioctl_reply_context[i] = reply;
648 break;
649 }
650 }
651 spin_unlock_irqrestore(pHba->host->host_lock, flags);
652 if (i >= nr) {
653 kfree (reply);
654 printk(KERN_WARNING"%s: Too many outstanding "
655 "ioctl commands\n", pHba->name);
656 return (u32)-1;
657 }
658
659 return i;
660#endif
661}
662
663/*
664 * Go from an u32 'context' to a pointer to ioctl reply data.
665 */
666static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
667{
668#if BITS_PER_LONG == 32
669 return (void *)(unsigned long)context;
670#else
671 void *p = pHba->ioctl_reply_context[context];
672 pHba->ioctl_reply_context[context] = NULL;
673
674 return p;
675#endif
676}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/*===========================================================================
679 * Error Handling routines
680 *===========================================================================
681 */
682
683static int adpt_abort(struct scsi_cmnd * cmd)
684{
685 adpt_hba* pHba = NULL; /* host bus adapter structure */
686 struct adpt_device* dptdevice; /* dpt per device information */
687 u32 msg[5];
688 int rcode;
689
690 if(cmd->serial_number == 0){
691 return FAILED;
692 }
693 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
Christoph Hellwig5cd049a2011-04-04 09:42:14 -0400694 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
696 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
697 return FAILED;
698 }
699
700 memset(msg, 0, sizeof(msg));
701 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
702 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
703 msg[2] = 0;
704 msg[3]= 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200705 msg[4] = adpt_cmd_to_context(cmd);
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800706 if (pHba->host)
707 spin_lock_irq(pHba->host->host_lock);
708 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
709 if (pHba->host)
710 spin_unlock_irq(pHba->host->host_lock);
711 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 if(rcode == -EOPNOTSUPP ){
713 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
714 return FAILED;
715 }
Christoph Hellwig5cd049a2011-04-04 09:42:14 -0400716 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 return FAILED;
718 }
Christoph Hellwig5cd049a2011-04-04 09:42:14 -0400719 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 return SUCCESS;
721}
722
723
724#define I2O_DEVICE_RESET 0x27
725// This is the same for BLK and SCSI devices
726// NOTE this is wrong in the i2o.h definitions
727// This is not currently supported by our adapter but we issue it anyway
728static int adpt_device_reset(struct scsi_cmnd* cmd)
729{
730 adpt_hba* pHba;
731 u32 msg[4];
732 u32 rcode;
733 int old_state;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -0700734 struct adpt_device* d = cmd->device->hostdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 pHba = (void*) cmd->device->host->hostdata[0];
737 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
738 if (!d) {
739 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
740 return FAILED;
741 }
742 memset(msg, 0, sizeof(msg));
743 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
744 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
745 msg[2] = 0;
746 msg[3] = 0;
747
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800748 if (pHba->host)
749 spin_lock_irq(pHba->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 old_state = d->state;
751 d->state |= DPTI_DEV_RESET;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800752 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
753 d->state = old_state;
754 if (pHba->host)
755 spin_unlock_irq(pHba->host->host_lock);
756 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 if(rcode == -EOPNOTSUPP ){
758 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
759 return FAILED;
760 }
761 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
762 return FAILED;
763 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
765 return SUCCESS;
766 }
767}
768
769
770#define I2O_HBA_BUS_RESET 0x87
771// This version of bus reset is called by the eh_error handler
772static int adpt_bus_reset(struct scsi_cmnd* cmd)
773{
774 adpt_hba* pHba;
775 u32 msg[4];
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800776 u32 rcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
779 memset(msg, 0, sizeof(msg));
780 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
781 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
782 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
783 msg[2] = 0;
784 msg[3] = 0;
Salyzyn, Marke5508c12005-12-17 19:26:30 -0800785 if (pHba->host)
786 spin_lock_irq(pHba->host->host_lock);
787 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
788 if (pHba->host)
789 spin_unlock_irq(pHba->host->host_lock);
790 if (rcode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
792 return FAILED;
793 } else {
794 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
795 return SUCCESS;
796 }
797}
798
799// This version of reset is called by the eh_error_handler
Jeff Garzik df0ae242005-05-28 07:57:14 -0400800static int __adpt_reset(struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
802 adpt_hba* pHba;
803 int rcode;
804 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
805 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
806 rcode = adpt_hba_reset(pHba);
807 if(rcode == 0){
808 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
809 return SUCCESS;
810 } else {
811 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
812 return FAILED;
813 }
814}
815
Jeff Garzik df0ae242005-05-28 07:57:14 -0400816static int adpt_reset(struct scsi_cmnd* cmd)
817{
818 int rc;
819
820 spin_lock_irq(cmd->device->host->host_lock);
821 rc = __adpt_reset(cmd);
822 spin_unlock_irq(cmd->device->host->host_lock);
823
824 return rc;
825}
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
828static int adpt_hba_reset(adpt_hba* pHba)
829{
830 int rcode;
831
832 pHba->state |= DPTI_STATE_RESET;
833
834 // Activate does get status , init outbound, and get hrt
835 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
836 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
837 adpt_i2o_delete_hba(pHba);
838 return rcode;
839 }
840
841 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
842 adpt_i2o_delete_hba(pHba);
843 return rcode;
844 }
845 PDEBUG("%s: in HOLD state\n",pHba->name);
846
847 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
848 adpt_i2o_delete_hba(pHba);
849 return rcode;
850 }
851 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
852
853 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
854 adpt_i2o_delete_hba(pHba);
855 return rcode;
856 }
857
858 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
859 adpt_i2o_delete_hba(pHba);
860 return rcode;
861 }
862 pHba->state &= ~DPTI_STATE_RESET;
863
864 adpt_fail_posted_scbs(pHba);
865 return 0; /* return success */
866}
867
868/*===========================================================================
869 *
870 *===========================================================================
871 */
872
873
874static void adpt_i2o_sys_shutdown(void)
875{
876 adpt_hba *pHba, *pNext;
Adrian Bunk458af542005-11-27 00:36:37 +0100877 struct adpt_i2o_post_wait_data *p1, *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
880 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
881 /* Delete all IOPs from the controller chain */
882 /* They should have already been released by the
883 * scsi-core
884 */
885 for (pHba = hba_chain; pHba; pHba = pNext) {
886 pNext = pHba->next;
887 adpt_i2o_delete_hba(pHba);
888 }
889
890 /* Remove any timedout entries from the wait queue. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891// spin_lock_irqsave(&adpt_post_wait_lock, flags);
892 /* Nothing should be outstanding at this point so just
893 * free them
894 */
Adrian Bunk458af542005-11-27 00:36:37 +0100895 for(p1 = adpt_post_wait_queue; p1;) {
896 old = p1;
897 p1 = p1->next;
898 kfree(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
901 adpt_post_wait_queue = NULL;
902
903 printk(KERN_INFO "Adaptec I2O controllers down.\n");
904}
905
Andrew Morton24601bb2007-12-10 15:49:20 -0800906static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
908
909 adpt_hba* pHba = NULL;
910 adpt_hba* p = NULL;
911 ulong base_addr0_phys = 0;
912 ulong base_addr1_phys = 0;
913 u32 hba_map0_area_size = 0;
914 u32 hba_map1_area_size = 0;
915 void __iomem *base_addr_virt = NULL;
916 void __iomem *msg_addr_virt = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200917 int dma64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
919 int raptorFlag = FALSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921 if(pci_enable_device(pDev)) {
922 return -EINVAL;
923 }
Salyzyn, Mark9638d892006-01-12 08:31:57 -0500924
925 if (pci_request_regions(pDev, "dpt_i2o")) {
926 PERROR("dpti: adpt_config_hba: pci request region failed\n");
927 return -EINVAL;
928 }
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 pci_set_master(pDev);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200931
932 /*
933 * See if we should enable dma64 mode.
934 */
935 if (sizeof(dma_addr_t) > 4 &&
Yang Hongyang6a355282009-04-06 19:01:13 -0700936 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
Yang Hongyang284901a2009-04-06 19:01:15 -0700937 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200938 dma64 = 1;
939 }
Yang Hongyang284901a2009-04-06 19:01:15 -0700940 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 return -EINVAL;
942
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200943 /* adapter only supports message blocks below 4GB */
Yang Hongyang284901a2009-04-06 19:01:15 -0700944 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +0200945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 base_addr0_phys = pci_resource_start(pDev,0);
947 hba_map0_area_size = pci_resource_len(pDev,0);
948
949 // Check if standard PCI card or single BAR Raptor
950 if(pDev->device == PCI_DPT_DEVICE_ID){
951 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
952 // Raptor card with this device id needs 4M
953 hba_map0_area_size = 0x400000;
954 } else { // Not Raptor - it is a PCI card
955 if(hba_map0_area_size > 0x100000 ){
956 hba_map0_area_size = 0x100000;
957 }
958 }
959 } else {// Raptor split BAR config
960 // Use BAR1 in this configuration
961 base_addr1_phys = pci_resource_start(pDev,1);
962 hba_map1_area_size = pci_resource_len(pDev,1);
963 raptorFlag = TRUE;
964 }
965
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +0200966#if BITS_PER_LONG == 64
967 /*
968 * The original Adaptec 64 bit driver has this comment here:
969 * "x86_64 machines need more optimal mappings"
970 *
971 * I assume some HBAs report ridiculously large mappings
972 * and we need to limit them on platforms with IOMMUs.
973 */
974 if (raptorFlag == TRUE) {
975 if (hba_map0_area_size > 128)
976 hba_map0_area_size = 128;
977 if (hba_map1_area_size > 524288)
978 hba_map1_area_size = 524288;
979 } else {
980 if (hba_map0_area_size > 524288)
981 hba_map0_area_size = 524288;
982 }
983#endif
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
986 if (!base_addr_virt) {
James Bottomley9c472dd2005-08-08 11:51:38 -0500987 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 PERROR("dpti: adpt_config_hba: io remap failed\n");
989 return -EINVAL;
990 }
991
992 if(raptorFlag == TRUE) {
993 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
994 if (!msg_addr_virt) {
995 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
996 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -0500997 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 return -EINVAL;
999 }
1000 } else {
1001 msg_addr_virt = base_addr_virt;
1002 }
1003
1004 // Allocate and zero the data structure
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02001005 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1006 if (!pHba) {
1007 if (msg_addr_virt != base_addr_virt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 iounmap(msg_addr_virt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 iounmap(base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001010 pci_release_regions(pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return -ENOMEM;
1012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Arjan van de Ven0b950672006-01-11 13:16:10 +01001014 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016 if(hba_chain != NULL){
1017 for(p = hba_chain; p->next; p = p->next);
1018 p->next = pHba;
1019 } else {
1020 hba_chain = pHba;
1021 }
1022 pHba->next = NULL;
1023 pHba->unit = hba_count;
Benoit Boissinot 23a2bc22005-04-25 19:46:30 -07001024 sprintf(pHba->name, "dpti%d", hba_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 hba_count++;
1026
Arjan van de Ven0b950672006-01-11 13:16:10 +01001027 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 pHba->pDev = pDev;
1030 pHba->base_addr_phys = base_addr0_phys;
1031
1032 // Set up the Virtual Base Address of the I2O Device
1033 pHba->base_addr_virt = base_addr_virt;
1034 pHba->msg_addr_virt = msg_addr_virt;
1035 pHba->irq_mask = base_addr_virt+0x30;
1036 pHba->post_port = base_addr_virt+0x40;
1037 pHba->reply_port = base_addr_virt+0x44;
1038
1039 pHba->hrt = NULL;
1040 pHba->lct = NULL;
1041 pHba->lct_size = 0;
1042 pHba->status_block = NULL;
1043 pHba->post_count = 0;
1044 pHba->state = DPTI_STATE_RESET;
1045 pHba->pDev = pDev;
1046 pHba->devices = NULL;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001047 pHba->dma64 = dma64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 // Initializing the spinlocks
1050 spin_lock_init(&pHba->state_lock);
1051 spin_lock_init(&adpt_post_wait_lock);
1052
1053 if(raptorFlag == 0){
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001054 printk(KERN_INFO "Adaptec I2O RAID controller"
1055 " %d at %p size=%x irq=%d%s\n",
1056 hba_count-1, base_addr_virt,
1057 hba_map0_area_size, pDev->irq,
1058 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 } else {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001060 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1061 hba_count-1, pDev->irq,
1062 dma64 ? " (64-bit DMA)" : "");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1064 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1065 }
1066
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07001067 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1069 adpt_i2o_delete_hba(pHba);
1070 return -EINVAL;
1071 }
1072
1073 return 0;
1074}
1075
1076
1077static void adpt_i2o_delete_hba(adpt_hba* pHba)
1078{
1079 adpt_hba* p1;
1080 adpt_hba* p2;
1081 struct i2o_device* d;
1082 struct i2o_device* next;
1083 int i;
1084 int j;
1085 struct adpt_device* pDev;
1086 struct adpt_device* pNext;
1087
1088
Arjan van de Ven0b950672006-01-11 13:16:10 +01001089 mutex_lock(&adpt_configuration_lock);
Andrew Morton24601bb2007-12-10 15:49:20 -08001090 // scsi_unregister calls our adpt_release which
1091 // does a quiese
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if(pHba->host){
1093 free_irq(pHba->host->irq, pHba);
1094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 p2 = NULL;
1096 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1097 if(p1 == pHba) {
1098 if(p2) {
1099 p2->next = p1->next;
1100 } else {
1101 hba_chain = p1->next;
1102 }
1103 break;
1104 }
1105 }
1106
1107 hba_count--;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001108 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
1110 iounmap(pHba->base_addr_virt);
James Bottomley9c472dd2005-08-08 11:51:38 -05001111 pci_release_regions(pHba->pDev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1113 iounmap(pHba->msg_addr_virt);
1114 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001115 if(pHba->FwDebugBuffer_P)
1116 iounmap(pHba->FwDebugBuffer_P);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001117 if(pHba->hrt) {
1118 dma_free_coherent(&pHba->pDev->dev,
1119 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1120 pHba->hrt, pHba->hrt_pa);
1121 }
1122 if(pHba->lct) {
1123 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1124 pHba->lct, pHba->lct_pa);
1125 }
1126 if(pHba->status_block) {
1127 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1128 pHba->status_block, pHba->status_block_pa);
1129 }
1130 if(pHba->reply_pool) {
1131 dma_free_coherent(&pHba->pDev->dev,
1132 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1133 pHba->reply_pool, pHba->reply_pool_pa);
1134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136 for(d = pHba->devices; d ; d = next){
1137 next = d->next;
1138 kfree(d);
1139 }
1140 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1141 for(j = 0; j < MAX_ID; j++){
1142 if(pHba->channel[i].device[j] != NULL){
1143 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1144 pNext = pDev->next_lun;
1145 kfree(pDev);
1146 }
1147 }
1148 }
1149 }
Alan Coxa07f3532006-09-15 15:34:32 +01001150 pci_dev_put(pHba->pDev);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001151 if (adpt_sysfs_class)
1152 device_destroy(adpt_sysfs_class,
1153 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
Dan Carpenter229bab62010-03-15 11:26:56 +03001154 kfree(pHba);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 if(hba_count <= 0){
1157 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
Miquel van Smoorenburg1ed43912008-05-02 01:08:19 +02001158 if (adpt_sysfs_class) {
1159 class_destroy(adpt_sysfs_class);
1160 adpt_sysfs_class = NULL;
1161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
1163}
1164
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02001165static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
1167 struct adpt_device* d;
1168
1169 if(chan < 0 || chan >= MAX_CHANNEL)
1170 return NULL;
1171
1172 if( pHba->channel[chan].device == NULL){
1173 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1174 return NULL;
1175 }
1176
1177 d = pHba->channel[chan].device[id];
1178 if(!d || d->tid == 0) {
1179 return NULL;
1180 }
1181
1182 /* If it is the only lun at that address then this should match*/
1183 if(d->scsi_lun == lun){
1184 return d;
1185 }
1186
1187 /* else we need to look through all the luns */
1188 for(d=d->next_lun ; d ; d = d->next_lun){
1189 if(d->scsi_lun == lun){
1190 return d;
1191 }
1192 }
1193 return NULL;
1194}
1195
1196
1197static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1198{
1199 // I used my own version of the WAIT_QUEUE_HEAD
1200 // to handle some version differences
1201 // When embedded in the kernel this could go back to the vanilla one
1202 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1203 int status = 0;
1204 ulong flags = 0;
1205 struct adpt_i2o_post_wait_data *p1, *p2;
1206 struct adpt_i2o_post_wait_data *wait_data =
Julia Lawallda2907f2010-05-30 15:49:22 +02001207 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
Andrew Morton4452ea52005-06-23 00:10:26 -07001208 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Andrew Morton4452ea52005-06-23 00:10:26 -07001210 if (!wait_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 return -ENOMEM;
Andrew Morton4452ea52005-06-23 00:10:26 -07001212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 /*
1214 * The spin locking is needed to keep anyone from playing
1215 * with the queue pointers and id while we do the same
1216 */
1217 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1218 // TODO we need a MORE unique way of getting ids
1219 // to support async LCT get
1220 wait_data->next = adpt_post_wait_queue;
1221 adpt_post_wait_queue = wait_data;
1222 adpt_post_wait_id++;
1223 adpt_post_wait_id &= 0x7fff;
1224 wait_data->id = adpt_post_wait_id;
1225 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226
1227 wait_data->wq = &adpt_wq_i2o_post;
1228 wait_data->status = -ETIMEDOUT;
1229
Andrew Morton4452ea52005-06-23 00:10:26 -07001230 add_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1233 timeout *= HZ;
1234 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1235 set_current_state(TASK_INTERRUPTIBLE);
1236 if(pHba->host)
1237 spin_unlock_irq(pHba->host->host_lock);
1238 if (!timeout)
1239 schedule();
1240 else{
1241 timeout = schedule_timeout(timeout);
1242 if (timeout == 0) {
1243 // I/O issued, but cannot get result in
1244 // specified time. Freeing resorces is
1245 // dangerous.
1246 status = -ETIME;
1247 }
1248 }
1249 if(pHba->host)
1250 spin_lock_irq(pHba->host->host_lock);
1251 }
Andrew Morton4452ea52005-06-23 00:10:26 -07001252 remove_wait_queue(&adpt_wq_i2o_post, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 if(status == -ETIMEDOUT){
1255 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1256 // We will have to free the wait_data memory during shutdown
1257 return status;
1258 }
1259
1260 /* Remove the entry from the queue. */
1261 p2 = NULL;
1262 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1263 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1264 if(p1 == wait_data) {
1265 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1266 status = -EOPNOTSUPP;
1267 }
1268 if(p2) {
1269 p2->next = p1->next;
1270 } else {
1271 adpt_post_wait_queue = p1->next;
1272 }
1273 break;
1274 }
1275 }
1276 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1277
1278 kfree(wait_data);
1279
1280 return status;
1281}
1282
1283
1284static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1285{
1286
1287 u32 m = EMPTY_QUEUE;
1288 u32 __iomem *msg;
1289 ulong timeout = jiffies + 30*HZ;
1290 do {
1291 rmb();
1292 m = readl(pHba->post_port);
1293 if (m != EMPTY_QUEUE) {
1294 break;
1295 }
1296 if(time_after(jiffies,timeout)){
1297 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1298 return -ETIMEDOUT;
1299 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001300 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 } while(m == EMPTY_QUEUE);
1302
1303 msg = pHba->msg_addr_virt + m;
1304 memcpy_toio(msg, data, len);
1305 wmb();
1306
1307 //post message
1308 writel(m, pHba->post_port);
1309 wmb();
1310
1311 return 0;
1312}
1313
1314
1315static void adpt_i2o_post_wait_complete(u32 context, int status)
1316{
1317 struct adpt_i2o_post_wait_data *p1 = NULL;
1318 /*
1319 * We need to search through the adpt_post_wait
1320 * queue to see if the given message is still
1321 * outstanding. If not, it means that the IOP
1322 * took longer to respond to the message than we
1323 * had allowed and timer has already expired.
1324 * Not much we can do about that except log
1325 * it for debug purposes, increase timeout, and recompile
1326 *
1327 * Lock needed to keep anyone from moving queue pointers
1328 * around while we're looking through them.
1329 */
1330
1331 context &= 0x7fff;
1332
1333 spin_lock(&adpt_post_wait_lock);
1334 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1335 if(p1->id == context) {
1336 p1->status = status;
1337 spin_unlock(&adpt_post_wait_lock);
1338 wake_up_interruptible(p1->wq);
1339 return;
1340 }
1341 }
1342 spin_unlock(&adpt_post_wait_lock);
1343 // If this happens we lose commands that probably really completed
1344 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1345 printk(KERN_DEBUG" Tasks in wait queue:\n");
1346 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1347 printk(KERN_DEBUG" %d\n",p1->id);
1348 }
1349 return;
1350}
1351
1352static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1353{
1354 u32 msg[8];
1355 u8* status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001356 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 u32 m = EMPTY_QUEUE ;
1358 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1359
1360 if(pHba->initialized == FALSE) { // First time reset should be quick
1361 timeout = jiffies + (25*HZ);
1362 } else {
1363 adpt_i2o_quiesce_hba(pHba);
1364 }
1365
1366 do {
1367 rmb();
1368 m = readl(pHba->post_port);
1369 if (m != EMPTY_QUEUE) {
1370 break;
1371 }
1372 if(time_after(jiffies,timeout)){
1373 printk(KERN_WARNING"Timeout waiting for message!\n");
1374 return -ETIMEDOUT;
1375 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001376 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 } while (m == EMPTY_QUEUE);
1378
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001379 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 if(status == NULL) {
1381 adpt_send_nop(pHba, m);
1382 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1383 return -ENOMEM;
1384 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001385 memset(status,0,4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1388 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1389 msg[2]=0;
1390 msg[3]=0;
1391 msg[4]=0;
1392 msg[5]=0;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001393 msg[6]=dma_low(addr);
1394 msg[7]=dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
1396 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1397 wmb();
1398 writel(m, pHba->post_port);
1399 wmb();
1400
1401 while(*status == 0){
1402 if(time_after(jiffies,timeout)){
1403 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001404 /* We lose 4 bytes of "status" here, but we cannot
1405 free these because controller may awake and corrupt
1406 those bytes at any time */
1407 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 return -ETIMEDOUT;
1409 }
1410 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001411 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 }
1413
1414 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1415 PDEBUG("%s: Reset in progress...\n", pHba->name);
1416 // Here we wait for message frame to become available
1417 // indicated that reset has finished
1418 do {
1419 rmb();
1420 m = readl(pHba->post_port);
1421 if (m != EMPTY_QUEUE) {
1422 break;
1423 }
1424 if(time_after(jiffies,timeout)){
1425 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001426 /* We lose 4 bytes of "status" here, but we
1427 cannot free these because controller may
1428 awake and corrupt those bytes at any time */
1429 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 return -ETIMEDOUT;
1431 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08001432 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 } while (m == EMPTY_QUEUE);
1434 // Flush the offset
1435 adpt_send_nop(pHba, m);
1436 }
1437 adpt_i2o_status_get(pHba);
1438 if(*status == 0x02 ||
1439 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1440 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1441 pHba->name);
1442 } else {
1443 PDEBUG("%s: Reset completed.\n", pHba->name);
1444 }
1445
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001446 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447#ifdef UARTDELAY
1448 // This delay is to allow someone attached to the card through the debug UART to
1449 // set up the dump levels that they want before the rest of the initialization sequence
1450 adpt_delay(20000);
1451#endif
1452 return 0;
1453}
1454
1455
1456static int adpt_i2o_parse_lct(adpt_hba* pHba)
1457{
1458 int i;
1459 int max;
1460 int tid;
1461 struct i2o_device *d;
1462 i2o_lct *lct = pHba->lct;
1463 u8 bus_no = 0;
1464 s16 scsi_id;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02001465 u64 scsi_lun;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 u32 buf[10]; // larger than 7, or 8 ...
1467 struct adpt_device* pDev;
1468
1469 if (lct == NULL) {
1470 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1471 return -1;
1472 }
1473
1474 max = lct->table_size;
1475 max -= 3;
1476 max /= 9;
1477
1478 for(i=0;i<max;i++) {
1479 if( lct->lct_entry[i].user_tid != 0xfff){
1480 /*
1481 * If we have hidden devices, we need to inform the upper layers about
1482 * the possible maximum id reference to handle device access when
1483 * an array is disassembled. This code has no other purpose but to
1484 * allow us future access to devices that are currently hidden
1485 * behind arrays, hotspares or have not been configured (JBOD mode).
1486 */
1487 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1488 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1489 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1490 continue;
1491 }
1492 tid = lct->lct_entry[i].tid;
1493 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1494 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1495 continue;
1496 }
1497 bus_no = buf[0]>>16;
1498 scsi_id = buf[1];
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02001499 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1501 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1502 continue;
1503 }
1504 if (scsi_id >= MAX_ID){
1505 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1506 continue;
1507 }
1508 if(bus_no > pHba->top_scsi_channel){
1509 pHba->top_scsi_channel = bus_no;
1510 }
1511 if(scsi_id > pHba->top_scsi_id){
1512 pHba->top_scsi_id = scsi_id;
1513 }
1514 if(scsi_lun > pHba->top_scsi_lun){
1515 pHba->top_scsi_lun = scsi_lun;
1516 }
1517 continue;
1518 }
Robert P. J. Day5cbded52006-12-13 00:35:56 -08001519 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 if(d==NULL)
1521 {
1522 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1523 return -ENOMEM;
1524 }
1525
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07001526 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 d->next = NULL;
1528
1529 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1530
1531 d->flags = 0;
1532 tid = d->lct_data.tid;
1533 adpt_i2o_report_hba_unit(pHba, d);
1534 adpt_i2o_install_device(pHba, d);
1535 }
1536 bus_no = 0;
1537 for(d = pHba->devices; d ; d = d->next) {
1538 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1539 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1540 tid = d->lct_data.tid;
1541 // TODO get the bus_no from hrt-but for now they are in order
1542 //bus_no =
1543 if(bus_no > pHba->top_scsi_channel){
1544 pHba->top_scsi_channel = bus_no;
1545 }
1546 pHba->channel[bus_no].type = d->lct_data.class_id;
1547 pHba->channel[bus_no].tid = tid;
1548 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1549 {
1550 pHba->channel[bus_no].scsi_id = buf[1];
1551 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1552 }
1553 // TODO remove - this is just until we get from hrt
1554 bus_no++;
1555 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1556 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1557 break;
1558 }
1559 }
1560 }
1561
1562 // Setup adpt_device table
1563 for(d = pHba->devices; d ; d = d->next) {
1564 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1565 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1566 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1567
1568 tid = d->lct_data.tid;
1569 scsi_id = -1;
1570 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1571 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1572 bus_no = buf[0]>>16;
1573 scsi_id = buf[1];
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02001574 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1576 continue;
1577 }
1578 if (scsi_id >= MAX_ID) {
1579 continue;
1580 }
1581 if( pHba->channel[bus_no].device[scsi_id] == NULL){
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301582 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 if(pDev == NULL) {
1584 return -ENOMEM;
1585 }
1586 pHba->channel[bus_no].device[scsi_id] = pDev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 } else {
1588 for( pDev = pHba->channel[bus_no].device[scsi_id];
1589 pDev->next_lun; pDev = pDev->next_lun){
1590 }
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301591 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if(pDev->next_lun == NULL) {
1593 return -ENOMEM;
1594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 pDev = pDev->next_lun;
1596 }
1597 pDev->tid = tid;
1598 pDev->scsi_channel = bus_no;
1599 pDev->scsi_id = scsi_id;
1600 pDev->scsi_lun = scsi_lun;
1601 pDev->pI2o_dev = d;
1602 d->owner = pDev;
1603 pDev->type = (buf[0])&0xff;
1604 pDev->flags = (buf[0]>>8)&0xff;
1605 if(scsi_id > pHba->top_scsi_id){
1606 pHba->top_scsi_id = scsi_id;
1607 }
1608 if(scsi_lun > pHba->top_scsi_lun){
1609 pHba->top_scsi_lun = scsi_lun;
1610 }
1611 }
1612 if(scsi_id == -1){
1613 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1614 d->lct_data.identity_tag);
1615 }
1616 }
1617 }
1618 return 0;
1619}
1620
1621
1622/*
1623 * Each I2O controller has a chain of devices on it - these match
1624 * the useful parts of the LCT of the board.
1625 */
1626
1627static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1628{
Arjan van de Ven0b950672006-01-11 13:16:10 +01001629 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 d->controller=pHba;
1631 d->owner=NULL;
1632 d->next=pHba->devices;
1633 d->prev=NULL;
1634 if (pHba->devices != NULL){
1635 pHba->devices->prev=d;
1636 }
1637 pHba->devices=d;
1638 *d->dev_name = 0;
1639
Arjan van de Ven0b950672006-01-11 13:16:10 +01001640 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 return 0;
1642}
1643
1644static int adpt_open(struct inode *inode, struct file *file)
1645{
1646 int minor;
1647 adpt_hba* pHba;
1648
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001649 mutex_lock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 //TODO check for root access
1651 //
1652 minor = iminor(inode);
1653 if (minor >= hba_count) {
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001654 mutex_unlock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 return -ENXIO;
1656 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001657 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1659 if (pHba->unit == minor) {
1660 break; /* found adapter */
1661 }
1662 }
1663 if (pHba == NULL) {
Arjan van de Ven0b950672006-01-11 13:16:10 +01001664 mutex_unlock(&adpt_configuration_lock);
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001665 mutex_unlock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 return -ENXIO;
1667 }
1668
1669// if(pHba->in_use){
Arjan van de Ven0b950672006-01-11 13:16:10 +01001670 // mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671// return -EBUSY;
1672// }
1673
1674 pHba->in_use = 1;
Arjan van de Ven0b950672006-01-11 13:16:10 +01001675 mutex_unlock(&adpt_configuration_lock);
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02001676 mutex_unlock(&adpt_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 return 0;
1679}
1680
1681static int adpt_close(struct inode *inode, struct file *file)
1682{
1683 int minor;
1684 adpt_hba* pHba;
1685
1686 minor = iminor(inode);
1687 if (minor >= hba_count) {
1688 return -ENXIO;
1689 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001690 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1692 if (pHba->unit == minor) {
1693 break; /* found adapter */
1694 }
1695 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01001696 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 if (pHba == NULL) {
1698 return -ENXIO;
1699 }
1700
1701 pHba->in_use = 0;
1702
1703 return 0;
1704}
1705
1706
1707static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1708{
1709 u32 msg[MAX_MESSAGE_SIZE];
1710 u32* reply = NULL;
1711 u32 size = 0;
1712 u32 reply_size = 0;
1713 u32 __user *user_msg = arg;
1714 u32 __user * user_reply = NULL;
1715 void *sg_list[pHba->sg_tablesize];
1716 u32 sg_offset = 0;
1717 u32 sg_count = 0;
1718 int sg_index = 0;
1719 u32 i = 0;
1720 u32 rcode = 0;
1721 void *p = NULL;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001722 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 ulong flags = 0;
1724
1725 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1726 // get user msg size in u32s
1727 if(get_user(size, &user_msg[0])){
1728 return -EFAULT;
1729 }
1730 size = size>>16;
1731
1732 user_reply = &user_msg[size];
1733 if(size > MAX_MESSAGE_SIZE){
1734 return -EFAULT;
1735 }
1736 size *= 4; // Convert to bytes
1737
1738 /* Copy in the user's I2O command */
1739 if(copy_from_user(msg, user_msg, size)) {
1740 return -EFAULT;
1741 }
1742 get_user(reply_size, &user_reply[0]);
1743 reply_size = reply_size>>16;
1744 if(reply_size > REPLY_FRAME_SIZE){
1745 reply_size = REPLY_FRAME_SIZE;
1746 }
1747 reply_size *= 4;
vignesh.babu@wipro.comab552202007-04-16 11:35:38 +05301748 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 if(reply == NULL) {
1750 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1751 return -ENOMEM;
1752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 sg_offset = (msg[0]>>4)&0xf;
1754 msg[2] = 0x40000000; // IOCTL context
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001755 msg[3] = adpt_ioctl_to_context(pHba, reply);
1756 if (msg[3] == (u32)-1)
1757 return -EBUSY;
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1760 if(sg_offset) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001761 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1763 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1764 if (sg_count > pHba->sg_tablesize){
1765 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1766 kfree (reply);
1767 return -EINVAL;
1768 }
1769
1770 for(i = 0; i < sg_count; i++) {
1771 int sg_size;
1772
1773 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1774 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1775 rcode = -EINVAL;
1776 goto cleanup;
1777 }
1778 sg_size = sg[i].flag_count & 0xffffff;
1779 /* Allocate memory for the transfer */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001780 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 if(!p) {
1782 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1783 pHba->name,sg_size,i,sg_count);
1784 rcode = -ENOMEM;
1785 goto cleanup;
1786 }
1787 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1788 /* Copy in the user's SG buffer if necessary */
1789 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001790 // sg_simple_element API is 32 bit
1791 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1793 rcode = -EFAULT;
1794 goto cleanup;
1795 }
1796 }
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001797 /* sg_simple_element API is 32 bit, but addr < 4GB */
1798 sg[i].addr_bus = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 }
1800 }
1801
1802 do {
Hannes Reinecke1f8c88c2013-10-23 10:51:15 +02001803 /*
1804 * Stop any new commands from enterring the
1805 * controller while processing the ioctl
1806 */
1807 if (pHba->host) {
1808 scsi_block_requests(pHba->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 spin_lock_irqsave(pHba->host->host_lock, flags);
Hannes Reinecke1f8c88c2013-10-23 10:51:15 +02001810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1812 if (rcode != 0)
1813 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1814 rcode, reply);
Hannes Reinecke1f8c88c2013-10-23 10:51:15 +02001815 if (pHba->host) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 spin_unlock_irqrestore(pHba->host->host_lock, flags);
Hannes Reinecke1f8c88c2013-10-23 10:51:15 +02001817 scsi_unblock_requests(pHba->host);
1818 }
1819 } while (rcode == -ETIMEDOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 if(rcode){
1822 goto cleanup;
1823 }
1824
1825 if(sg_offset) {
1826 /* Copy back the Scatter Gather buffers back to user space */
1827 u32 j;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001828 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 struct sg_simple_element* sg;
1830 int sg_size;
1831
1832 // re-acquire the original message to handle correctly the sg copy operation
1833 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1834 // get user msg size in u32s
1835 if(get_user(size, &user_msg[0])){
1836 rcode = -EFAULT;
1837 goto cleanup;
1838 }
1839 size = size>>16;
1840 size *= 4;
Alan Coxef7562b2009-10-27 15:35:35 +00001841 if (size > MAX_MESSAGE_SIZE) {
OGAWA Hirofumiaefba412009-10-30 17:02:31 +09001842 rcode = -EINVAL;
Alan Coxef7562b2009-10-27 15:35:35 +00001843 goto cleanup;
1844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 /* Copy in the user's I2O command */
1846 if (copy_from_user (msg, user_msg, size)) {
1847 rcode = -EFAULT;
1848 goto cleanup;
1849 }
1850 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1851
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001852 // TODO add 64 bit API
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 sg = (struct sg_simple_element*)(msg + sg_offset);
1854 for (j = 0; j < sg_count; j++) {
1855 /* Copy out the SG list to user's buffer if necessary */
1856 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1857 sg_size = sg[j].flag_count & 0xffffff;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02001858 // sg_simple_element API is 32 bit
1859 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1861 rcode = -EFAULT;
1862 goto cleanup;
1863 }
1864 }
1865 }
1866 }
1867
1868 /* Copy back the reply to user space */
1869 if (reply_size) {
1870 // we wrote our own values for context - now restore the user supplied ones
1871 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1872 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1873 rcode = -EFAULT;
1874 }
1875 if(copy_to_user(user_reply, reply, reply_size)) {
1876 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1877 rcode = -EFAULT;
1878 }
1879 }
1880
1881
1882cleanup:
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001883 if (rcode != -ETIME && rcode != -EINTR) {
1884 struct sg_simple_element *sg =
1885 (struct sg_simple_element*) (msg +sg_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 kfree (reply);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02001887 while(sg_index) {
1888 if(sg_list[--sg_index]) {
1889 dma_free_coherent(&pHba->pDev->dev,
1890 sg[sg_index].flag_count & 0xffffff,
1891 sg_list[sg_index],
1892 sg[sg_index].addr_bus);
1893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 }
1895 }
1896 return rcode;
1897}
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899#if defined __ia64__
1900static void adpt_ia64_info(sysInfo_S* si)
1901{
1902 // This is all the info we need for now
1903 // We will add more info as our new
1904 // managmenent utility requires it
1905 si->processorType = PROC_IA64;
1906}
1907#endif
1908
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909#if defined __sparc__
1910static void adpt_sparc_info(sysInfo_S* si)
1911{
1912 // This is all the info we need for now
1913 // We will add more info as our new
1914 // managmenent utility requires it
1915 si->processorType = PROC_ULTRASPARC;
1916}
1917#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918#if defined __alpha__
1919static void adpt_alpha_info(sysInfo_S* si)
1920{
1921 // This is all the info we need for now
1922 // We will add more info as our new
1923 // managmenent utility requires it
1924 si->processorType = PROC_ALPHA;
1925}
1926#endif
1927
1928#if defined __i386__
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929static void adpt_i386_info(sysInfo_S* si)
1930{
1931 // This is all the info we need for now
1932 // We will add more info as our new
1933 // managmenent utility requires it
1934 switch (boot_cpu_data.x86) {
1935 case CPU_386:
1936 si->processorType = PROC_386;
1937 break;
1938 case CPU_486:
1939 si->processorType = PROC_486;
1940 break;
1941 case CPU_586:
1942 si->processorType = PROC_PENTIUM;
1943 break;
1944 default: // Just in case
1945 si->processorType = PROC_PENTIUM;
1946 break;
1947 }
1948}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949#endif
1950
Andrew Morton8b2cc912008-05-06 20:42:42 -07001951/*
1952 * This routine returns information about the system. This does not effect
1953 * any logic and if the info is wrong - it doesn't matter.
1954 */
1955
1956/* Get all the info we can not get from kernel services */
1957static int adpt_system_info(void __user *buffer)
1958{
1959 sysInfo_S si;
1960
1961 memset(&si, 0, sizeof(si));
1962
1963 si.osType = OS_LINUX;
1964 si.osMajorVersion = 0;
1965 si.osMinorVersion = 0;
1966 si.osRevision = 0;
1967 si.busType = SI_PCI_BUS;
1968 si.processorFamily = DPTI_sig.dsProcessorFamily;
1969
1970#if defined __i386__
1971 adpt_i386_info(&si);
1972#elif defined (__ia64__)
1973 adpt_ia64_info(&si);
1974#elif defined(__sparc__)
1975 adpt_sparc_info(&si);
1976#elif defined (__alpha__)
1977 adpt_alpha_info(&si);
1978#else
1979 si.processorType = 0xff ;
1980#endif
1981 if (copy_to_user(buffer, &si, sizeof(si))){
1982 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1983 return -EFAULT;
1984 }
1985
1986 return 0;
1987}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Arnd Bergmannf4927c42010-04-27 00:24:01 +02001989static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990{
1991 int minor;
1992 int error = 0;
1993 adpt_hba* pHba;
1994 ulong flags = 0;
1995 void __user *argp = (void __user *)arg;
1996
1997 minor = iminor(inode);
1998 if (minor >= DPTI_MAX_HBA){
1999 return -ENXIO;
2000 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002001 mutex_lock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2003 if (pHba->unit == minor) {
2004 break; /* found adapter */
2005 }
2006 }
Arjan van de Ven0b950672006-01-11 13:16:10 +01002007 mutex_unlock(&adpt_configuration_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 if(pHba == NULL){
2009 return -ENXIO;
2010 }
2011
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002012 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2013 schedule_timeout_uninterruptible(2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 switch (cmd) {
2016 // TODO: handle 3 cases
2017 case DPT_SIGNATURE:
2018 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2019 return -EFAULT;
2020 }
2021 break;
2022 case I2OUSRCMD:
2023 return adpt_i2o_passthru(pHba, argp);
2024
2025 case DPT_CTRLINFO:{
2026 drvrHBAinfo_S HbaInfo;
2027
2028#define FLG_OSD_PCI_VALID 0x0001
2029#define FLG_OSD_DMA 0x0002
2030#define FLG_OSD_I2O 0x0004
2031 memset(&HbaInfo, 0, sizeof(HbaInfo));
2032 HbaInfo.drvrHBAnum = pHba->unit;
2033 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2034 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2035 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2036 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2037 HbaInfo.Interrupt = pHba->pDev->irq;
2038 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2039 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2040 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2041 return -EFAULT;
2042 }
2043 break;
2044 }
2045 case DPT_SYSINFO:
2046 return adpt_system_info(argp);
2047 case DPT_BLINKLED:{
2048 u32 value;
2049 value = (u32)adpt_read_blink_led(pHba);
2050 if (copy_to_user(argp, &value, sizeof(value))) {
2051 return -EFAULT;
2052 }
2053 break;
2054 }
2055 case I2ORESETCMD:
2056 if(pHba->host)
2057 spin_lock_irqsave(pHba->host->host_lock, flags);
2058 adpt_hba_reset(pHba);
2059 if(pHba->host)
2060 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2061 break;
2062 case I2ORESCANCMD:
2063 adpt_rescan(pHba);
2064 break;
2065 default:
2066 return -EINVAL;
2067 }
2068
2069 return error;
2070}
2071
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002072static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2073{
2074 struct inode *inode;
2075 long ret;
2076
Al Viro496ad9a2013-01-23 17:07:38 -05002077 inode = file_inode(file);
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002078
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002079 mutex_lock(&adpt_mutex);
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002080 ret = adpt_ioctl(inode, file, cmd, arg);
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002081 mutex_unlock(&adpt_mutex);
Arnd Bergmannf4927c42010-04-27 00:24:01 +02002082
2083 return ret;
2084}
2085
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002086#ifdef CONFIG_COMPAT
2087static long compat_adpt_ioctl(struct file *file,
2088 unsigned int cmd, unsigned long arg)
2089{
2090 struct inode *inode;
2091 long ret;
2092
Al Viro496ad9a2013-01-23 17:07:38 -05002093 inode = file_inode(file);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002094
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002095 mutex_lock(&adpt_mutex);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002096
2097 switch(cmd) {
2098 case DPT_SIGNATURE:
2099 case I2OUSRCMD:
2100 case DPT_CTRLINFO:
2101 case DPT_SYSINFO:
2102 case DPT_BLINKLED:
2103 case I2ORESETCMD:
2104 case I2ORESCANCMD:
2105 case (DPT_TARGET_BUSY & 0xFFFF):
2106 case DPT_TARGET_BUSY:
2107 ret = adpt_ioctl(inode, file, cmd, arg);
2108 break;
2109 default:
2110 ret = -ENOIOCTLCMD;
2111 }
2112
Arnd Bergmannc45d15d2010-06-02 14:28:52 +02002113 mutex_unlock(&adpt_mutex);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002114
2115 return ret;
2116}
2117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
David Howells7d12e782006-10-05 14:55:46 +01002119static irqreturn_t adpt_isr(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120{
2121 struct scsi_cmnd* cmd;
2122 adpt_hba* pHba = dev_id;
2123 u32 m;
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002124 void __iomem *reply;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 u32 status=0;
2126 u32 context;
2127 ulong flags = 0;
2128 int handled = 0;
2129
2130 if (pHba == NULL){
2131 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2132 return IRQ_NONE;
2133 }
2134 if(pHba->host)
2135 spin_lock_irqsave(pHba->host->host_lock, flags);
2136
2137 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2138 m = readl(pHba->reply_port);
2139 if(m == EMPTY_QUEUE){
2140 // Try twice then give up
2141 rmb();
2142 m = readl(pHba->reply_port);
2143 if(m == EMPTY_QUEUE){
2144 // This really should not happen
2145 printk(KERN_ERR"dpti: Could not get reply frame\n");
2146 goto out;
2147 }
2148 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002149 if (pHba->reply_pool_pa <= m &&
2150 m < pHba->reply_pool_pa +
2151 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2152 reply = (u8 *)pHba->reply_pool +
2153 (m - pHba->reply_pool_pa);
2154 } else {
2155 /* Ick, we should *never* be here */
2156 printk(KERN_ERR "dpti: reply frame not from pool\n");
2157 reply = (u8 *)bus_to_virt(m);
2158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
2160 if (readl(reply) & MSG_FAIL) {
2161 u32 old_m = readl(reply+28);
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002162 void __iomem *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 u32 old_context;
2164 PDEBUG("%s: Failed message\n",pHba->name);
2165 if(old_m >= 0x100000){
2166 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2167 writel(m,pHba->reply_port);
2168 continue;
2169 }
2170 // Transaction context is 0 in failed reply frame
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002171 msg = pHba->msg_addr_virt + old_m;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 old_context = readl(msg+12);
2173 writel(old_context, reply+12);
2174 adpt_send_nop(pHba, old_m);
2175 }
2176 context = readl(reply+8);
2177 if(context & 0x40000000){ // IOCTL
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002178 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002179 if( p != NULL) {
2180 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 }
2182 // All IOCTLs will also be post wait
2183 }
2184 if(context & 0x80000000){ // Post wait message
2185 status = readl(reply+16);
2186 if(status >> 24){
2187 status &= 0xffff; /* Get detail status */
2188 } else {
2189 status = I2O_POST_WAIT_OK;
2190 }
2191 if(!(context & 0x40000000)) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002192 cmd = adpt_cmd_from_context(pHba,
2193 readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 if(cmd != NULL) {
2195 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2196 }
2197 }
2198 adpt_i2o_post_wait_complete(context, status);
2199 } else { // SCSI message
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002200 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 if(cmd != NULL){
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002202 scsi_dma_unmap(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 if(cmd->serial_number != 0) { // If not timedout
2204 adpt_i2o_to_scsi(reply, cmd);
2205 }
2206 }
2207 }
2208 writel(m, pHba->reply_port);
2209 wmb();
2210 rmb();
2211 }
2212 handled = 1;
2213out: if(pHba->host)
2214 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2215 return IRQ_RETVAL(handled);
2216}
2217
2218static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2219{
2220 int i;
2221 u32 msg[MAX_MESSAGE_SIZE];
2222 u32* mptr;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002223 u32* lptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 u32 *lenptr;
2225 int direction;
2226 int scsidir;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002227 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 u32 len;
2229 u32 reqlen;
2230 s32 rcode;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002231 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
2233 memset(msg, 0 , sizeof(msg));
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002234 len = scsi_bufflen(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 direction = 0x00000000;
2236
2237 scsidir = 0x00000000; // DATA NO XFER
2238 if(len) {
2239 /*
2240 * Set SCBFlags to indicate if data is being transferred
2241 * in or out, or no data transfer
2242 * Note: Do not have to verify index is less than 0 since
2243 * cmd->cmnd[0] is an unsigned char
2244 */
2245 switch(cmd->sc_data_direction){
2246 case DMA_FROM_DEVICE:
2247 scsidir =0x40000000; // DATA IN (iop<--dev)
2248 break;
2249 case DMA_TO_DEVICE:
2250 direction=0x04000000; // SGL OUT
2251 scsidir =0x80000000; // DATA OUT (iop-->dev)
2252 break;
2253 case DMA_NONE:
2254 break;
2255 case DMA_BIDIRECTIONAL:
2256 scsidir =0x40000000; // DATA IN (iop<--dev)
2257 // Assume In - and continue;
2258 break;
2259 default:
2260 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2261 pHba->name, cmd->cmnd[0]);
2262 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2263 cmd->scsi_done(cmd);
2264 return 0;
2265 }
2266 }
2267 // msg[0] is set later
2268 // I2O_CMD_SCSI_EXEC
2269 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2270 msg[2] = 0;
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002271 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 // Our cards use the transaction context as the tag for queueing
2273 // Adaptec/DPT Private stuff
2274 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2275 msg[5] = d->tid;
2276 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2277 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2278 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2279 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2280 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2281
2282 mptr=msg+7;
2283
2284 // Write SCSI command into the message - always 16 byte block
2285 memset(mptr, 0, 16);
2286 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2287 mptr+=4;
2288 lenptr=mptr++; /* Remember me - fill in when we know */
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002289 if (dpt_dma64(pHba)) {
2290 reqlen = 16; // SINGLE SGE
2291 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2292 *mptr++ = 1 << PAGE_SHIFT;
2293 } else {
2294 reqlen = 14; // SINGLE SGE
2295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 /* Now fill in the SGList and command */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002298 nseg = scsi_dma_map(cmd);
2299 BUG_ON(nseg < 0);
2300 if (nseg) {
2301 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 len = 0;
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002304 scsi_for_each_sg(cmd, sg, nseg, i) {
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002305 lptr = mptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2307 len+=sg_dma_len(sg);
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002308 addr = sg_dma_address(sg);
2309 *mptr++ = dma_low(addr);
2310 if (dpt_dma64(pHba))
2311 *mptr++ = dma_high(addr);
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002312 /* Make this an end of list */
2313 if (i == nseg - 1)
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002314 *lptr = direction|0xD0000000|sg_dma_len(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 reqlen = mptr - msg;
2317 *lenptr = len;
2318
2319 if(cmd->underflow && len != cmd->underflow){
2320 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2321 len, cmd->underflow);
2322 }
2323 } else {
FUJITA Tomonori10803de2007-05-26 02:08:10 +09002324 *lenptr = len = 0;
2325 reqlen = 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 }
2327
2328 /* Stick the headers on */
2329 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2330
2331 // Send it on it's way
2332 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2333 if (rcode == 0) {
2334 return 0;
2335 }
2336 return rcode;
2337}
2338
2339
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002340static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
Andrew Morton24601bb2007-12-10 15:49:20 -08002341{
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002342 struct Scsi_Host *host;
Andrew Morton24601bb2007-12-10 15:49:20 -08002343
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002344 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
Andrew Morton24601bb2007-12-10 15:49:20 -08002345 if (host == NULL) {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02002346 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
Andrew Morton24601bb2007-12-10 15:49:20 -08002347 return -1;
2348 }
2349 host->hostdata[0] = (unsigned long)pHba;
2350 pHba->host = host;
2351
2352 host->irq = pHba->pDev->irq;
2353 /* no IO ports, so don't have to set host->io_port and
2354 * host->n_io_port
2355 */
2356 host->io_port = 0;
2357 host->n_io_port = 0;
2358 /* see comments in scsi_host.h */
2359 host->max_id = 16;
2360 host->max_lun = 256;
2361 host->max_channel = pHba->top_scsi_channel + 1;
2362 host->cmd_per_lun = 1;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002363 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
Andrew Morton24601bb2007-12-10 15:49:20 -08002364 host->sg_tablesize = pHba->sg_tablesize;
2365 host->can_queue = pHba->post_fifo_size;
2366
2367 return 0;
2368}
2369
2370
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002371static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372{
2373 adpt_hba* pHba;
2374 u32 hba_status;
2375 u32 dev_status;
2376 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2377 // I know this would look cleaner if I just read bytes
2378 // but the model I have been using for all the rest of the
2379 // io is in 4 byte words - so I keep that model
2380 u16 detailed_status = readl(reply+16) &0xffff;
2381 dev_status = (detailed_status & 0xff);
2382 hba_status = detailed_status >> 8;
2383
2384 // calculate resid for sg
Miquel van Smoorenburgdf81d232008-11-05 00:09:12 +01002385 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386
2387 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2388
2389 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2390
2391 if(!(reply_flags & MSG_FAIL)) {
2392 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2393 case I2O_SCSI_DSC_SUCCESS:
2394 cmd->result = (DID_OK << 16);
2395 // handle underflow
Miquel van Smoorenburgdf81d232008-11-05 00:09:12 +01002396 if (readl(reply+20) < cmd->underflow) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 cmd->result = (DID_ERROR <<16);
2398 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2399 }
2400 break;
2401 case I2O_SCSI_DSC_REQUEST_ABORTED:
2402 cmd->result = (DID_ABORT << 16);
2403 break;
2404 case I2O_SCSI_DSC_PATH_INVALID:
2405 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2406 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2407 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2408 case I2O_SCSI_DSC_NO_ADAPTER:
2409 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002410 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2411 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 cmd->result = (DID_TIME_OUT << 16);
2413 break;
2414 case I2O_SCSI_DSC_ADAPTER_BUSY:
2415 case I2O_SCSI_DSC_BUS_BUSY:
2416 cmd->result = (DID_BUS_BUSY << 16);
2417 break;
2418 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2419 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2420 cmd->result = (DID_RESET << 16);
2421 break;
2422 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2423 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2424 cmd->result = (DID_PARITY << 16);
2425 break;
2426 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2427 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2428 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2429 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2430 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2431 case I2O_SCSI_DSC_DATA_OVERRUN:
2432 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2433 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2434 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2435 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2436 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2437 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2438 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2439 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2440 case I2O_SCSI_DSC_INVALID_CDB:
2441 case I2O_SCSI_DSC_LUN_INVALID:
2442 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2443 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2444 case I2O_SCSI_DSC_NO_NEXUS:
2445 case I2O_SCSI_DSC_CDB_RECEIVED:
2446 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2447 case I2O_SCSI_DSC_QUEUE_FROZEN:
2448 case I2O_SCSI_DSC_REQUEST_INVALID:
2449 default:
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002450 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2451 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 hba_status, dev_status, cmd->cmnd[0]);
2453 cmd->result = (DID_ERROR << 16);
2454 break;
2455 }
2456
2457 // copy over the request sense data if it was a check
2458 // condition status
Salyzyn, Markd814c512008-01-14 11:04:40 -08002459 if (dev_status == SAM_STAT_CHECK_CONDITION) {
FUJITA Tomonorib80ca4f2008-01-13 15:46:13 +09002460 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 // Copy over the sense data
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002462 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2464 cmd->sense_buffer[2] == DATA_PROTECT ){
2465 /* This is to handle an array failed */
2466 cmd->result = (DID_TIME_OUT << 16);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002467 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2468 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 hba_status, dev_status, cmd->cmnd[0]);
2470
2471 }
2472 }
2473 } else {
2474 /* In this condtion we could not talk to the tid
2475 * the card rejected it. We should signal a retry
2476 * for a limitted number of retries.
2477 */
2478 cmd->result = (DID_TIME_OUT << 16);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002479 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2480 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2482 }
2483
2484 cmd->result |= (dev_status);
2485
2486 if(cmd->scsi_done != NULL){
2487 cmd->scsi_done(cmd);
2488 }
2489 return cmd->result;
2490}
2491
2492
2493static s32 adpt_rescan(adpt_hba* pHba)
2494{
2495 s32 rcode;
2496 ulong flags = 0;
2497
2498 if(pHba->host)
2499 spin_lock_irqsave(pHba->host->host_lock, flags);
2500 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2501 goto out;
2502 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2503 goto out;
2504 rcode = 0;
2505out: if(pHba->host)
2506 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2507 return rcode;
2508}
2509
2510
2511static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2512{
2513 int i;
2514 int max;
2515 int tid;
2516 struct i2o_device *d;
2517 i2o_lct *lct = pHba->lct;
2518 u8 bus_no = 0;
2519 s16 scsi_id;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002520 u64 scsi_lun;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 u32 buf[10]; // at least 8 u32's
2522 struct adpt_device* pDev = NULL;
2523 struct i2o_device* pI2o_dev = NULL;
2524
2525 if (lct == NULL) {
2526 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2527 return -1;
2528 }
2529
2530 max = lct->table_size;
2531 max -= 3;
2532 max /= 9;
2533
2534 // Mark each drive as unscanned
2535 for (d = pHba->devices; d; d = d->next) {
2536 pDev =(struct adpt_device*) d->owner;
2537 if(!pDev){
2538 continue;
2539 }
2540 pDev->state |= DPTI_DEV_UNSCANNED;
2541 }
2542
2543 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2544
2545 for(i=0;i<max;i++) {
2546 if( lct->lct_entry[i].user_tid != 0xfff){
2547 continue;
2548 }
2549
2550 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2551 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2552 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2553 tid = lct->lct_entry[i].tid;
2554 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2555 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2556 continue;
2557 }
2558 bus_no = buf[0]>>16;
Dan Carpentere84d96d2010-07-15 10:20:19 +02002559 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2560 printk(KERN_WARNING
2561 "%s: Channel number %d out of range\n",
2562 pHba->name, bus_no);
2563 continue;
2564 }
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 scsi_id = buf[1];
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002567 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 pDev = pHba->channel[bus_no].device[scsi_id];
2569 /* da lun */
2570 while(pDev) {
2571 if(pDev->scsi_lun == scsi_lun) {
2572 break;
2573 }
2574 pDev = pDev->next_lun;
2575 }
2576 if(!pDev ) { // Something new add it
Julia Lawallda2907f2010-05-30 15:49:22 +02002577 d = kmalloc(sizeof(struct i2o_device),
2578 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 if(d==NULL)
2580 {
2581 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2582 return -ENOMEM;
2583 }
2584
Benoit Boissinot 1c2fb3f2005-04-25 19:46:48 -07002585 d->controller = pHba;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 d->next = NULL;
2587
2588 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2589
2590 d->flags = 0;
2591 adpt_i2o_report_hba_unit(pHba, d);
2592 adpt_i2o_install_device(pHba, d);
2593
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 pDev = pHba->channel[bus_no].device[scsi_id];
2595 if( pDev == NULL){
Julia Lawallda2907f2010-05-30 15:49:22 +02002596 pDev =
2597 kzalloc(sizeof(struct adpt_device),
2598 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 if(pDev == NULL) {
2600 return -ENOMEM;
2601 }
2602 pHba->channel[bus_no].device[scsi_id] = pDev;
2603 } else {
2604 while (pDev->next_lun) {
2605 pDev = pDev->next_lun;
2606 }
Julia Lawallda2907f2010-05-30 15:49:22 +02002607 pDev = pDev->next_lun =
2608 kzalloc(sizeof(struct adpt_device),
2609 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 if(pDev == NULL) {
2611 return -ENOMEM;
2612 }
2613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 pDev->tid = d->lct_data.tid;
2615 pDev->scsi_channel = bus_no;
2616 pDev->scsi_id = scsi_id;
2617 pDev->scsi_lun = scsi_lun;
2618 pDev->pI2o_dev = d;
2619 d->owner = pDev;
2620 pDev->type = (buf[0])&0xff;
2621 pDev->flags = (buf[0]>>8)&0xff;
2622 // Too late, SCSI system has made up it's mind, but what the hey ...
2623 if(scsi_id > pHba->top_scsi_id){
2624 pHba->top_scsi_id = scsi_id;
2625 }
2626 if(scsi_lun > pHba->top_scsi_lun){
2627 pHba->top_scsi_lun = scsi_lun;
2628 }
2629 continue;
2630 } // end of new i2o device
2631
2632 // We found an old device - check it
2633 while(pDev) {
2634 if(pDev->scsi_lun == scsi_lun) {
2635 if(!scsi_device_online(pDev->pScsi_dev)) {
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002636 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 pHba->name,bus_no,scsi_id,scsi_lun);
2638 if (pDev->pScsi_dev) {
2639 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2640 }
2641 }
2642 d = pDev->pI2o_dev;
2643 if(d->lct_data.tid != tid) { // something changed
2644 pDev->tid = tid;
2645 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2646 if (pDev->pScsi_dev) {
2647 pDev->pScsi_dev->changed = TRUE;
2648 pDev->pScsi_dev->removable = TRUE;
2649 }
2650 }
2651 // Found it - mark it scanned
2652 pDev->state = DPTI_DEV_ONLINE;
2653 break;
2654 }
2655 pDev = pDev->next_lun;
2656 }
2657 }
2658 }
2659 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2660 pDev =(struct adpt_device*) pI2o_dev->owner;
2661 if(!pDev){
2662 continue;
2663 }
2664 // Drive offline drives that previously existed but could not be found
2665 // in the LCT table
2666 if (pDev->state & DPTI_DEV_UNSCANNED){
2667 pDev->state = DPTI_DEV_OFFLINE;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002668 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 if (pDev->pScsi_dev) {
2670 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2671 }
2672 }
2673 }
2674 return 0;
2675}
2676
2677static void adpt_fail_posted_scbs(adpt_hba* pHba)
2678{
2679 struct scsi_cmnd* cmd = NULL;
2680 struct scsi_device* d = NULL;
2681
2682 shost_for_each_device(d, pHba->host) {
2683 unsigned long flags;
2684 spin_lock_irqsave(&d->list_lock, flags);
2685 list_for_each_entry(cmd, &d->cmd_list, list) {
2686 if(cmd->serial_number == 0){
2687 continue;
2688 }
2689 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2690 cmd->scsi_done(cmd);
2691 }
2692 spin_unlock_irqrestore(&d->list_lock, flags);
2693 }
2694}
2695
2696
2697/*============================================================================
2698 * Routines from i2o subsystem
2699 *============================================================================
2700 */
2701
2702
2703
2704/*
2705 * Bring an I2O controller into HOLD state. See the spec.
2706 */
2707static int adpt_i2o_activate_hba(adpt_hba* pHba)
2708{
2709 int rcode;
2710
2711 if(pHba->initialized ) {
2712 if (adpt_i2o_status_get(pHba) < 0) {
2713 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2714 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2715 return rcode;
2716 }
2717 if (adpt_i2o_status_get(pHba) < 0) {
2718 printk(KERN_INFO "HBA not responding.\n");
2719 return -1;
2720 }
2721 }
2722
2723 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2724 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2725 return -1;
2726 }
2727
2728 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2729 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2730 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2731 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2732 adpt_i2o_reset_hba(pHba);
2733 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2734 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2735 return -1;
2736 }
2737 }
2738 } else {
2739 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2740 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2741 return rcode;
2742 }
2743
2744 }
2745
2746 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2747 return -1;
2748 }
2749
2750 /* In HOLD state */
2751
2752 if (adpt_i2o_hrt_get(pHba) < 0) {
2753 return -1;
2754 }
2755
2756 return 0;
2757}
2758
2759/*
2760 * Bring a controller online into OPERATIONAL state.
2761 */
2762
2763static int adpt_i2o_online_hba(adpt_hba* pHba)
2764{
2765 if (adpt_i2o_systab_send(pHba) < 0) {
2766 adpt_i2o_delete_hba(pHba);
2767 return -1;
2768 }
2769 /* In READY state */
2770
2771 if (adpt_i2o_enable_hba(pHba) < 0) {
2772 adpt_i2o_delete_hba(pHba);
2773 return -1;
2774 }
2775
2776 /* In OPERATIONAL state */
2777 return 0;
2778}
2779
2780static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2781{
2782 u32 __iomem *msg;
2783 ulong timeout = jiffies + 5*HZ;
2784
2785 while(m == EMPTY_QUEUE){
2786 rmb();
2787 m = readl(pHba->post_port);
2788 if(m != EMPTY_QUEUE){
2789 break;
2790 }
2791 if(time_after(jiffies,timeout)){
2792 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2793 return 2;
2794 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002795 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 }
2797 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2798 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2799 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2800 writel( 0,&msg[2]);
2801 wmb();
2802
2803 writel(m, pHba->post_port);
2804 wmb();
2805 return 0;
2806}
2807
2808static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2809{
2810 u8 *status;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002811 dma_addr_t addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 u32 __iomem *msg = NULL;
2813 int i;
2814 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 u32 m;
2816
2817 do {
2818 rmb();
2819 m = readl(pHba->post_port);
2820 if (m != EMPTY_QUEUE) {
2821 break;
2822 }
2823
2824 if(time_after(jiffies,timeout)){
2825 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2826 return -ETIMEDOUT;
2827 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002828 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 } while(m == EMPTY_QUEUE);
2830
2831 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2832
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002833 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002834 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 adpt_send_nop(pHba, m);
2836 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2837 pHba->name);
2838 return -ENOMEM;
2839 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002840 memset(status, 0, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
2842 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2843 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2844 writel(0, &msg[2]);
2845 writel(0x0106, &msg[3]); /* Transaction context */
2846 writel(4096, &msg[4]); /* Host page frame size */
2847 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2848 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002849 writel((u32)addr, &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
2851 writel(m, pHba->post_port);
2852 wmb();
2853
2854 // Wait for the reply status to come back
2855 do {
2856 if (*status) {
2857 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2858 break;
2859 }
2860 }
2861 rmb();
2862 if(time_after(jiffies,timeout)){
2863 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002864 /* We lose 4 bytes of "status" here, but we
2865 cannot free these because controller may
2866 awake and corrupt those bytes at any time */
2867 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 return -ETIMEDOUT;
2869 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002870 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 } while (1);
2872
2873 // If the command was successful, fill the fifo with our reply
2874 // message packets
2875 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002876 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 return -2;
2878 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002879 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002881 if(pHba->reply_pool != NULL) {
2882 dma_free_coherent(&pHba->pDev->dev,
2883 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2884 pHba->reply_pool, pHba->reply_pool_pa);
2885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002887 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2888 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2889 &pHba->reply_pool_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02002890 if (!pHba->reply_pool) {
2891 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2892 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002894 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 for(i = 0; i < pHba->reply_fifo_size; i++) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002897 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2898 pHba->reply_port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 }
2901 adpt_i2o_status_get(pHba);
2902 return 0;
2903}
2904
2905
2906/*
2907 * I2O System Table. Contains information about
2908 * all the IOPs in the system. Used to inform IOPs
2909 * about each other's existence.
2910 *
2911 * sys_tbl_ver is the CurrentChangeIndicator that is
2912 * used by IOPs to track changes.
2913 */
2914
2915
2916
2917static s32 adpt_i2o_status_get(adpt_hba* pHba)
2918{
2919 ulong timeout;
2920 u32 m;
2921 u32 __iomem *msg;
2922 u8 *status_block=NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923
2924 if(pHba->status_block == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002925 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2926 sizeof(i2o_status_block),
2927 &pHba->status_block_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 if(pHba->status_block == NULL) {
2929 printk(KERN_ERR
2930 "dpti%d: Get Status Block failed; Out of memory. \n",
2931 pHba->unit);
2932 return -ENOMEM;
2933 }
2934 }
2935 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2936 status_block = (u8*)(pHba->status_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2938 do {
2939 rmb();
2940 m = readl(pHba->post_port);
2941 if (m != EMPTY_QUEUE) {
2942 break;
2943 }
2944 if(time_after(jiffies,timeout)){
2945 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2946 pHba->name);
2947 return -ETIMEDOUT;
2948 }
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002949 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 } while(m==EMPTY_QUEUE);
2951
2952
2953 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2954
2955 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2956 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2957 writel(1, &msg[2]);
2958 writel(0, &msg[3]);
2959 writel(0, &msg[4]);
2960 writel(0, &msg[5]);
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02002961 writel( dma_low(pHba->status_block_pa), &msg[6]);
2962 writel( dma_high(pHba->status_block_pa), &msg[7]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2964
2965 //post message
2966 writel(m, pHba->post_port);
2967 wmb();
2968
2969 while(status_block[87]!=0xff){
2970 if(time_after(jiffies,timeout)){
2971 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2972 pHba->unit);
2973 return -ETIMEDOUT;
2974 }
2975 rmb();
Nishanth Aravamudana9a30472005-11-07 01:01:20 -08002976 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 }
2978
2979 // Set up our number of outbound and inbound messages
2980 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2981 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2982 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2983 }
2984
2985 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2986 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2987 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2988 }
2989
2990 // Calculate the Scatter Gather list size
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02002991 if (dpt_dma64(pHba)) {
2992 pHba->sg_tablesize
2993 = ((pHba->status_block->inbound_frame_size * 4
2994 - 14 * sizeof(u32))
2995 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2996 } else {
2997 pHba->sg_tablesize
2998 = ((pHba->status_block->inbound_frame_size * 4
2999 - 12 * sizeof(u32))
3000 / sizeof(struct sg_simple_element));
3001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3003 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3004 }
3005
3006
3007#ifdef DEBUG
3008 printk("dpti%d: State = ",pHba->unit);
3009 switch(pHba->status_block->iop_state) {
3010 case 0x01:
3011 printk("INIT\n");
3012 break;
3013 case 0x02:
3014 printk("RESET\n");
3015 break;
3016 case 0x04:
3017 printk("HOLD\n");
3018 break;
3019 case 0x05:
3020 printk("READY\n");
3021 break;
3022 case 0x08:
3023 printk("OPERATIONAL\n");
3024 break;
3025 case 0x10:
3026 printk("FAILED\n");
3027 break;
3028 case 0x11:
3029 printk("FAULTED\n");
3030 break;
3031 default:
3032 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3033 }
3034#endif
3035 return 0;
3036}
3037
3038/*
3039 * Get the IOP's Logical Configuration Table
3040 */
3041static int adpt_i2o_lct_get(adpt_hba* pHba)
3042{
3043 u32 msg[8];
3044 int ret;
3045 u32 buf[16];
3046
3047 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3048 pHba->lct_size = pHba->status_block->expected_lct_size;
3049 }
3050 do {
3051 if (pHba->lct == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003052 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3053 pHba->lct_size, &pHba->lct_pa,
Julia Lawallda2907f2010-05-30 15:49:22 +02003054 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 if(pHba->lct == NULL) {
3056 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3057 pHba->name);
3058 return -ENOMEM;
3059 }
3060 }
3061 memset(pHba->lct, 0, pHba->lct_size);
3062
3063 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3064 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3065 msg[2] = 0;
3066 msg[3] = 0;
3067 msg[4] = 0xFFFFFFFF; /* All devices */
3068 msg[5] = 0x00000000; /* Report now */
3069 msg[6] = 0xD0000000|pHba->lct_size;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003070 msg[7] = (u32)pHba->lct_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071
3072 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3073 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3074 pHba->name, ret);
3075 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3076 return ret;
3077 }
3078
3079 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3080 pHba->lct_size = pHba->lct->table_size << 2;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003081 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3082 pHba->lct, pHba->lct_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 pHba->lct = NULL;
3084 }
3085 } while (pHba->lct == NULL);
3086
3087 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3088
3089
3090 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3091 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3092 pHba->FwDebugBufferSize = buf[1];
Miquel van Smoorenburg62ac5ae2008-05-02 01:07:27 +02003093 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3094 pHba->FwDebugBufferSize);
3095 if (pHba->FwDebugBuffer_P) {
3096 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3097 FW_DEBUG_FLAGS_OFFSET;
3098 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3099 FW_DEBUG_BLED_OFFSET;
3100 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3101 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3102 FW_DEBUG_STR_LENGTH_OFFSET;
3103 pHba->FwDebugBuffer_P += buf[2];
3104 pHba->FwDebugFlags = 0;
3105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106 }
3107
3108 return 0;
3109}
3110
3111static int adpt_i2o_build_sys_table(void)
3112{
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003113 adpt_hba* pHba = hba_chain;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 int count = 0;
3115
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003116 if (sys_tbl)
3117 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3118 sys_tbl, sys_tbl_pa);
3119
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3121 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3122
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003123 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3124 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
Mariusz Kozlowskibbfbbbc2007-08-11 10:13:24 +02003125 if (!sys_tbl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3127 return -ENOMEM;
3128 }
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003129 memset(sys_tbl, 0, sys_tbl_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130
3131 sys_tbl->num_entries = hba_count;
3132 sys_tbl->version = I2OVERSION;
3133 sys_tbl->change_ind = sys_tbl_ind++;
3134
3135 for(pHba = hba_chain; pHba; pHba = pHba->next) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003136 u64 addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 // Get updated Status Block so we have the latest information
3138 if (adpt_i2o_status_get(pHba)) {
3139 sys_tbl->num_entries--;
3140 continue; // try next one
3141 }
3142
3143 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3144 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3145 sys_tbl->iops[count].seg_num = 0;
3146 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3147 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3148 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3149 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3150 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3151 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003152 addr = pHba->base_addr_phys + 0x40;
3153 sys_tbl->iops[count].inbound_low = dma_low(addr);
3154 sys_tbl->iops[count].inbound_high = dma_high(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
3156 count++;
3157 }
3158
3159#ifdef DEBUG
3160{
3161 u32 *table = (u32*)sys_tbl;
3162 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3163 for(count = 0; count < (sys_tbl_len >>2); count++) {
3164 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3165 count, table[count]);
3166 }
3167}
3168#endif
3169
3170 return 0;
3171}
3172
3173
3174/*
3175 * Dump the information block associated with a given unit (TID)
3176 */
3177
3178static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3179{
3180 char buf[64];
3181 int unit = d->lct_data.tid;
3182
3183 printk(KERN_INFO "TID %3.3d ", unit);
3184
3185 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3186 {
3187 buf[16]=0;
3188 printk(" Vendor: %-12.12s", buf);
3189 }
3190 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3191 {
3192 buf[16]=0;
3193 printk(" Device: %-12.12s", buf);
3194 }
3195 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3196 {
3197 buf[8]=0;
3198 printk(" Rev: %-12.12s\n", buf);
3199 }
3200#ifdef DEBUG
3201 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3202 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3203 printk(KERN_INFO "\tFlags: ");
3204
3205 if(d->lct_data.device_flags&(1<<0))
3206 printk("C"); // ConfigDialog requested
3207 if(d->lct_data.device_flags&(1<<1))
3208 printk("U"); // Multi-user capable
3209 if(!(d->lct_data.device_flags&(1<<4)))
3210 printk("P"); // Peer service enabled!
3211 if(!(d->lct_data.device_flags&(1<<5)))
3212 printk("M"); // Mgmt service enabled!
3213 printk("\n");
3214#endif
3215}
3216
3217#ifdef DEBUG
3218/*
3219 * Do i2o class name lookup
3220 */
3221static const char *adpt_i2o_get_class_name(int class)
3222{
3223 int idx = 16;
3224 static char *i2o_class_name[] = {
3225 "Executive",
3226 "Device Driver Module",
3227 "Block Device",
3228 "Tape Device",
3229 "LAN Interface",
3230 "WAN Interface",
3231 "Fibre Channel Port",
3232 "Fibre Channel Device",
3233 "SCSI Device",
3234 "ATE Port",
3235 "ATE Device",
3236 "Floppy Controller",
3237 "Floppy Device",
3238 "Secondary Bus Port",
3239 "Peer Transport Agent",
3240 "Peer Transport",
3241 "Unknown"
3242 };
3243
3244 switch(class&0xFFF) {
3245 case I2O_CLASS_EXECUTIVE:
3246 idx = 0; break;
3247 case I2O_CLASS_DDM:
3248 idx = 1; break;
3249 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3250 idx = 2; break;
3251 case I2O_CLASS_SEQUENTIAL_STORAGE:
3252 idx = 3; break;
3253 case I2O_CLASS_LAN:
3254 idx = 4; break;
3255 case I2O_CLASS_WAN:
3256 idx = 5; break;
3257 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3258 idx = 6; break;
3259 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3260 idx = 7; break;
3261 case I2O_CLASS_SCSI_PERIPHERAL:
3262 idx = 8; break;
3263 case I2O_CLASS_ATE_PORT:
3264 idx = 9; break;
3265 case I2O_CLASS_ATE_PERIPHERAL:
3266 idx = 10; break;
3267 case I2O_CLASS_FLOPPY_CONTROLLER:
3268 idx = 11; break;
3269 case I2O_CLASS_FLOPPY_DEVICE:
3270 idx = 12; break;
3271 case I2O_CLASS_BUS_ADAPTER_PORT:
3272 idx = 13; break;
3273 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3274 idx = 14; break;
3275 case I2O_CLASS_PEER_TRANSPORT:
3276 idx = 15; break;
3277 }
3278 return i2o_class_name[idx];
3279}
3280#endif
3281
3282
3283static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3284{
3285 u32 msg[6];
3286 int ret, size = sizeof(i2o_hrt);
3287
3288 do {
3289 if (pHba->hrt == NULL) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003290 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3291 size, &pHba->hrt_pa, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 if (pHba->hrt == NULL) {
3293 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3294 return -ENOMEM;
3295 }
3296 }
3297
3298 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3299 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3300 msg[2]= 0;
3301 msg[3]= 0;
3302 msg[4]= (0xD0000000 | size); /* Simple transaction */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003303 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304
3305 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3306 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3307 return ret;
3308 }
3309
3310 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003311 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3312 dma_free_coherent(&pHba->pDev->dev, size,
3313 pHba->hrt, pHba->hrt_pa);
3314 size = newsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 pHba->hrt = NULL;
3316 }
3317 } while(pHba->hrt == NULL);
3318 return 0;
3319}
3320
3321/*
3322 * Query one scalar group value or a whole scalar group.
3323 */
3324static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3325 int group, int field, void *buf, int buflen)
3326{
3327 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003328 u8 *opblk_va;
3329 dma_addr_t opblk_pa;
3330 u8 *resblk_va;
3331 dma_addr_t resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
3333 int size;
3334
3335 /* 8 bytes for header */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003336 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3337 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3338 if (resblk_va == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3340 return -ENOMEM;
3341 }
3342
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003343 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3344 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3345 if (opblk_va == NULL) {
3346 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3347 resblk_va, resblk_pa);
3348 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3349 pHba->name);
3350 return -ENOMEM;
3351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 if (field == -1) /* whole group */
3353 opblk[4] = -1;
3354
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003355 memcpy(opblk_va, opblk, sizeof(opblk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003357 opblk_va, opblk_pa, sizeof(opblk),
3358 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3359 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 if (size == -ETIME) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003361 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3362 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3364 return -ETIME;
3365 } else if (size == -EINTR) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003366 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3367 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3369 return -EINTR;
3370 }
3371
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003372 memcpy(buf, resblk_va+8, buflen); /* cut off header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003374 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3375 resblk_va, resblk_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 if (size < 0)
3377 return size;
3378
3379 return buflen;
3380}
3381
3382
3383/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3384 *
3385 * This function can be used for all UtilParamsGet/Set operations.
3386 * The OperationBlock is given in opblk-buffer,
3387 * and results are returned in resblk-buffer.
3388 * Note that the minimum sized resblk is 8 bytes and contains
3389 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3390 */
3391static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003392 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3393 void *resblk_va, dma_addr_t resblk_pa, int reslen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394{
3395 u32 msg[9];
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003396 u32 *res = (u32 *)resblk_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 int wait_status;
3398
3399 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3400 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3401 msg[2] = 0;
3402 msg[3] = 0;
3403 msg[4] = 0;
3404 msg[5] = 0x54000000 | oplen; /* OperationBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003405 msg[6] = (u32)opblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003407 msg[8] = (u32)resblk_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408
3409 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003410 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 return wait_status; /* -DetailedStatus */
3412 }
3413
3414 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3415 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3416 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3417 pHba->name,
3418 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3419 : "PARAMS_GET",
3420 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3421 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3422 }
3423
3424 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3425}
3426
3427
3428static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3429{
3430 u32 msg[4];
3431 int ret;
3432
3433 adpt_i2o_status_get(pHba);
3434
3435 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3436
3437 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3438 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3439 return 0;
3440 }
3441
3442 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3443 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3444 msg[2] = 0;
3445 msg[3] = 0;
3446
3447 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3448 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3449 pHba->unit, -ret);
3450 } else {
3451 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3452 }
3453
3454 adpt_i2o_status_get(pHba);
3455 return ret;
3456}
3457
3458
3459/*
3460 * Enable IOP. Allows the IOP to resume external operations.
3461 */
3462static int adpt_i2o_enable_hba(adpt_hba* pHba)
3463{
3464 u32 msg[4];
3465 int ret;
3466
3467 adpt_i2o_status_get(pHba);
3468 if(!pHba->status_block){
3469 return -ENOMEM;
3470 }
3471 /* Enable only allowed on READY state */
3472 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3473 return 0;
3474
3475 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3476 return -EINVAL;
3477
3478 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3479 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3480 msg[2]= 0;
3481 msg[3]= 0;
3482
3483 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3484 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3485 pHba->name, ret);
3486 } else {
3487 PDEBUG("%s: Enabled.\n", pHba->name);
3488 }
3489
3490 adpt_i2o_status_get(pHba);
3491 return ret;
3492}
3493
3494
3495static int adpt_i2o_systab_send(adpt_hba* pHba)
3496{
3497 u32 msg[12];
3498 int ret;
3499
3500 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3501 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3502 msg[2] = 0;
3503 msg[3] = 0;
3504 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3505 msg[5] = 0; /* Segment 0 */
3506
3507 /*
3508 * Provide three SGL-elements:
3509 * System table (SysTab), Private memory space declaration and
3510 * Private i/o space declaration
3511 */
3512 msg[6] = 0x54000000 | sys_tbl_len;
Miquel van Smoorenburg67af2b02008-05-02 01:06:39 +02003513 msg[7] = (u32)sys_tbl_pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 msg[8] = 0x54000000 | 0;
3515 msg[9] = 0;
3516 msg[10] = 0xD4000000 | 0;
3517 msg[11] = 0;
3518
3519 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3520 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3521 pHba->name, ret);
3522 }
3523#ifdef DEBUG
3524 else {
3525 PINFO("%s: SysTab set.\n", pHba->name);
3526 }
3527#endif
3528
3529 return ret;
3530 }
3531
3532
3533/*============================================================================
3534 *
3535 *============================================================================
3536 */
3537
3538
3539#ifdef UARTDELAY
3540
3541static static void adpt_delay(int millisec)
3542{
3543 int i;
3544 for (i = 0; i < millisec; i++) {
3545 udelay(1000); /* delay for one millisecond */
3546 }
3547}
3548
3549#endif
3550
Andrew Morton24601bb2007-12-10 15:49:20 -08003551static struct scsi_host_template driver_template = {
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003552 .module = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 .name = "dpt_i2o",
3554 .proc_name = "dpt_i2o",
Al Viroff98f7c2013-03-31 03:21:50 -04003555 .show_info = adpt_show_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 .info = adpt_info,
3557 .queuecommand = adpt_queue,
3558 .eh_abort_handler = adpt_abort,
3559 .eh_device_reset_handler = adpt_device_reset,
3560 .eh_bus_reset_handler = adpt_bus_reset,
3561 .eh_host_reset_handler = adpt_reset,
3562 .bios_param = adpt_bios_param,
3563 .slave_configure = adpt_slave_configure,
3564 .can_queue = MAX_TO_IOP_MESSAGES,
3565 .this_id = 7,
3566 .cmd_per_lun = 1,
3567 .use_clustering = ENABLE_CLUSTERING,
3568};
Miquel van Smoorenburgc864cb12008-05-02 01:05:33 +02003569
3570static int __init adpt_init(void)
3571{
3572 int error;
3573 adpt_hba *pHba, *next;
3574
3575 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3576
3577 error = adpt_detect(&driver_template);
3578 if (error < 0)
3579 return error;
3580 if (hba_chain == NULL)
3581 return -ENODEV;
3582
3583 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3584 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3585 if (error)
3586 goto fail;
3587 scsi_scan_host(pHba->host);
3588 }
3589 return 0;
3590fail:
3591 for (pHba = hba_chain; pHba; pHba = next) {
3592 next = pHba->next;
3593 scsi_remove_host(pHba->host);
3594 }
3595 return error;
3596}
3597
3598static void __exit adpt_exit(void)
3599{
3600 adpt_hba *pHba, *next;
3601
3602 for (pHba = hba_chain; pHba; pHba = pHba->next)
3603 scsi_remove_host(pHba->host);
3604 for (pHba = hba_chain; pHba; pHba = next) {
3605 next = pHba->next;
3606 adpt_release(pHba->host);
3607 }
3608}
3609
3610module_init(adpt_init);
3611module_exit(adpt_exit);
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613MODULE_LICENSE("GPL");