blob: b2e60bd4a0c69130f781710a1fa51ac6f7656710 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050073#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050074#include <linux/hdreg.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080075#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/io.h>
77#include <asm/irq.h>
78#include <asm/processor.h>
79#include <scsi/scsi.h>
80#include <scsi/scsi_host.h>
81#include <scsi/scsi_tcq.h>
82#include <scsi/scsi_eh.h>
83#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include "ipr.h"
85
86/*
87 * Global Data
88 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080089static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
91static unsigned int ipr_max_speed = 1;
92static int ipr_testmode = 0;
93static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050094static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.com62275042005-11-01 17:01:14 -060095static unsigned int ipr_enable_cache = 1;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060096static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080097static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -050098static unsigned int ipr_dual_ioa_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099static DEFINE_SPINLOCK(ipr_driver_lock);
100
101/* This table describes the differences between DMA controller chips */
102static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 .mailbox = 0x0042C,
105 .cache_line_size = 0x20,
106 {
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
109 .sense_interrupt_mask_reg = 0x0022C,
110 .clr_interrupt_reg = 0x00228,
111 .sense_interrupt_reg = 0x00224,
112 .ioarrin_reg = 0x00404,
113 .sense_uproc_interrupt_reg = 0x00214,
114 .set_uproc_interrupt_reg = 0x00214,
115 .clr_uproc_interrupt_reg = 0x00218
116 }
117 },
118 { /* Snipe and Scamp */
119 .mailbox = 0x0052C,
120 .cache_line_size = 0x20,
121 {
122 .set_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_mask_reg = 0x0028C,
124 .sense_interrupt_mask_reg = 0x00288,
125 .clr_interrupt_reg = 0x00284,
126 .sense_interrupt_reg = 0x00280,
127 .ioarrin_reg = 0x00504,
128 .sense_uproc_interrupt_reg = 0x00290,
129 .set_uproc_interrupt_reg = 0x00290,
130 .clr_uproc_interrupt_reg = 0x00294
131 }
132 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800133 { /* CRoC */
134 .mailbox = 0x00040,
135 .cache_line_size = 0x20,
136 {
137 .set_interrupt_mask_reg = 0x00010,
138 .clr_interrupt_mask_reg = 0x00018,
139 .sense_interrupt_mask_reg = 0x00010,
140 .clr_interrupt_reg = 0x00008,
141 .sense_interrupt_reg = 0x00000,
142 .ioarrin_reg = 0x00070,
143 .sense_uproc_interrupt_reg = 0x00020,
144 .set_uproc_interrupt_reg = 0x00020,
145 .clr_uproc_interrupt_reg = 0x00028
146 }
147 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148};
149
150static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyera32c0552010-02-19 13:23:36 -0800151 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
152 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
153 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
154 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
155 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
156 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
157 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158};
159
160static int ipr_max_bus_speeds [] = {
161 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
162};
163
164MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
165MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
166module_param_named(max_speed, ipr_max_speed, uint, 0);
167MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
168module_param_named(log_level, ipr_log_level, uint, 0);
169MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
170module_param_named(testmode, ipr_testmode, int, 0);
171MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800172module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
174module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
175MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
brking@us.ibm.com62275042005-11-01 17:01:14 -0600176module_param_named(enable_cache, ipr_enable_cache, int, 0);
177MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800178module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600179MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500180module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
181MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800182module_param_named(max_devs, ipr_max_devs, int, 0);
183MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
184 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185MODULE_LICENSE("GPL");
186MODULE_VERSION(IPR_DRIVER_VERSION);
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/* A constant array of IOASCs/URCs/Error Messages */
189static const
190struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500191 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 "8155: An unknown error was received"},
193 {0x00330000, 0, 0,
194 "Soft underlength error"},
195 {0x005A0000, 0, 0,
196 "Command to be cancelled not found"},
197 {0x00808000, 0, 0,
198 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500199 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500201 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500202 "4101: Soft device bus fabric error"},
Brian King933916f2007-03-29 12:43:30 -0500203 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500205 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500207 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500209 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500211 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500213 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500215 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500217 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500219 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500221 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500223 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500225 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500226 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500227 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500229 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 "8009: Impending cache battery pack failure"},
231 {0x02040400, 0, 0,
232 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500233 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 {0x023F0000, 0, 0,
236 "Synchronization required"},
237 {0x024E0000, 0, 0,
238 "No ready, IOA shutdown"},
239 {0x025A0000, 0, 0,
240 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500241 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 "3020: Storage subsystem configuration error"},
243 {0x03110B00, 0, 0,
244 "FFF5: Medium error, data unreadable, recommend reassign"},
245 {0x03110C00, 0, 0,
246 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500247 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500249 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500251 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500253 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 "3109: IOA timed out a device command"},
255 {0x04088000, 0, 0,
256 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500257 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500258 "4100: Hard device bus fabric error"},
Brian King933916f2007-03-29 12:43:30 -0500259 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500261 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500263 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 "9002: IOA reserved area LRC error"},
Brian King933916f2007-03-29 12:43:30 -0500265 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500267 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500269 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500271 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500273 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500275 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 "8150: PCI bus error"},
277 {0x04430000, 1, 0,
278 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500279 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500281 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500283 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500285 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 "8151: IOA microcode error"},
287 {0x04448500, 0, 0,
288 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500289 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500291 {0x04448700, 0, 0,
292 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 {0x04490000, 0, 0,
294 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500295 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500297 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500299 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500301 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500303 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500305 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500306 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500307 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500309 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600310 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500311 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500312 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500313 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500314 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500315 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500316 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500317 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500318 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500319 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 "FFF4: Command to logical unit failed"},
321 {0x05240000, 1, 0,
322 "Illegal request, invalid request type or request packet"},
323 {0x05250000, 0, 0,
324 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600325 {0x05258000, 0, 0,
326 "Illegal request, commands not allowed to this device"},
327 {0x05258100, 0, 0,
328 "Illegal request, command not allowed to a secondary adapter"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 {0x05260000, 0, 0,
330 "Illegal request, invalid field in parameter list"},
331 {0x05260100, 0, 0,
332 "Illegal request, parameter not supported"},
333 {0x05260200, 0, 0,
334 "Illegal request, parameter value invalid"},
335 {0x052C0000, 0, 0,
336 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600337 {0x052C8000, 1, 0,
338 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500339 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500341 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500343 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500344 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500345 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 "FFFB: SCSI bus was reset"},
347 {0x06290500, 0, 0,
348 "FFFE: SCSI bus transition to single ended"},
349 {0x06290600, 0, 0,
350 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500351 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500353 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500355 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500357 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600358 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500359 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500361 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500363 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500365 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600366 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500367 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500368 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500369 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500370 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500371 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500372 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500373 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500374 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500375 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500376 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800377 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
378 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500379 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500381 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500383 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500385 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600386 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500387 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600388 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500389 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500391 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
392 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500393 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500394 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500395 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500396 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 {0x07270000, 0, 0,
398 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500399 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500401 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500403 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500405 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500407 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500409 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500411 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500413 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500415 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500417 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500419 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500421 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500423 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500425 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500427 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500429 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500431 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500433 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500435 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500437 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 "9063: Maximum number of functional arrays has been exceeded"},
439 {0x0B260000, 0, 0,
440 "Aborted command, invalid descriptor"},
441 {0x0B5A0000, 0, 0,
442 "Command terminated by host"}
443};
444
445static const struct ipr_ses_table_entry ipr_ses_table[] = {
446 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
447 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
448 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
449 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
450 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
451 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
452 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
453 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
454 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
455 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
456 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
457 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
458 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
459};
460
461/*
462 * Function Prototypes
463 */
464static int ipr_reset_alert(struct ipr_cmnd *);
465static void ipr_process_ccn(struct ipr_cmnd *);
466static void ipr_process_error(struct ipr_cmnd *);
467static void ipr_reset_ioa_job(struct ipr_cmnd *);
468static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
469 enum ipr_shutdown_type);
470
471#ifdef CONFIG_SCSI_IPR_TRACE
472/**
473 * ipr_trc_hook - Add a trace entry to the driver trace
474 * @ipr_cmd: ipr command struct
475 * @type: trace type
476 * @add_data: additional data
477 *
478 * Return value:
479 * none
480 **/
481static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
482 u8 type, u32 add_data)
483{
484 struct ipr_trace_entry *trace_entry;
485 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
486
487 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
488 trace_entry->time = jiffies;
489 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
490 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800491 if (ipr_cmd->ioa_cfg->sis64)
492 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
493 else
494 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500495 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
497 trace_entry->u.add_data = add_data;
498}
499#else
500#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
501#endif
502
503/**
504 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
505 * @ipr_cmd: ipr command struct
506 *
507 * Return value:
508 * none
509 **/
510static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
511{
512 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
513 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -0800514 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
516 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -0800517 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800519 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800521
522 if (ipr_cmd->ioa_cfg->sis64)
523 ioarcb->u.sis64_addr_data.data_ioadl_addr =
524 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
525 else {
526 ioarcb->write_ioadl_addr =
527 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
528 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
529 }
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 ioasa->ioasc = 0;
532 ioasa->residual_data_len = 0;
Brian King35a39692006-09-25 12:39:20 -0500533 ioasa->u.gata.status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500536 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 ipr_cmd->sense_buffer[0] = 0;
538 ipr_cmd->dma_use_sg = 0;
539}
540
541/**
542 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
543 * @ipr_cmd: ipr command struct
544 *
545 * Return value:
546 * none
547 **/
548static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
549{
550 ipr_reinit_ipr_cmnd(ipr_cmd);
551 ipr_cmd->u.scratch = 0;
552 ipr_cmd->sibling = NULL;
553 init_timer(&ipr_cmd->timer);
554}
555
556/**
557 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
558 * @ioa_cfg: ioa config struct
559 *
560 * Return value:
561 * pointer to ipr command struct
562 **/
563static
564struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
565{
566 struct ipr_cmnd *ipr_cmd;
567
568 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
569 list_del(&ipr_cmd->queue);
570 ipr_init_ipr_cmnd(ipr_cmd);
571
572 return ipr_cmd;
573}
574
575/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
577 * @ioa_cfg: ioa config struct
578 * @clr_ints: interrupts to clear
579 *
580 * This function masks all interrupts on the adapter, then clears the
581 * interrupts specified in the mask
582 *
583 * Return value:
584 * none
585 **/
586static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
587 u32 clr_ints)
588{
589 volatile u32 int_reg;
590
591 /* Stop new interrupts */
592 ioa_cfg->allow_interrupts = 0;
593
594 /* Set interrupt mask to stop all new interrupts */
595 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
596
597 /* Clear any pending interrupts */
598 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
599 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
600}
601
602/**
603 * ipr_save_pcix_cmd_reg - Save PCI-X command register
604 * @ioa_cfg: ioa config struct
605 *
606 * Return value:
607 * 0 on success / -EIO on failure
608 **/
609static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
610{
611 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
612
Brian King7dce0e12007-01-23 11:25:30 -0600613 if (pcix_cmd_reg == 0)
614 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
617 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
618 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
619 return -EIO;
620 }
621
622 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
623 return 0;
624}
625
626/**
627 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
628 * @ioa_cfg: ioa config struct
629 *
630 * Return value:
631 * 0 on success / -EIO on failure
632 **/
633static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
634{
635 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
636
637 if (pcix_cmd_reg) {
638 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
639 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
640 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
641 return -EIO;
642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 }
644
645 return 0;
646}
647
648/**
Brian King35a39692006-09-25 12:39:20 -0500649 * ipr_sata_eh_done - done function for aborted SATA commands
650 * @ipr_cmd: ipr command struct
651 *
652 * This function is invoked for ops generated to SATA
653 * devices which are being aborted.
654 *
655 * Return value:
656 * none
657 **/
658static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
659{
660 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
661 struct ata_queued_cmd *qc = ipr_cmd->qc;
662 struct ipr_sata_port *sata_port = qc->ap->private_data;
663
664 qc->err_mask |= AC_ERR_OTHER;
665 sata_port->ioasa.status |= ATA_BUSY;
666 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
667 ata_qc_complete(qc);
668}
669
670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 * ipr_scsi_eh_done - mid-layer done function for aborted ops
672 * @ipr_cmd: ipr command struct
673 *
674 * This function is invoked by the interrupt handler for
675 * ops generated by the SCSI mid-layer which are being aborted.
676 *
677 * Return value:
678 * none
679 **/
680static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
681{
682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
683 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
684
685 scsi_cmd->result |= (DID_ERROR << 16);
686
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900687 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 scsi_cmd->scsi_done(scsi_cmd);
689 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
690}
691
692/**
693 * ipr_fail_all_ops - Fails all outstanding ops.
694 * @ioa_cfg: ioa config struct
695 *
696 * This function fails all outstanding ops.
697 *
698 * Return value:
699 * none
700 **/
701static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
702{
703 struct ipr_cmnd *ipr_cmd, *temp;
704
705 ENTER;
706 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
707 list_del(&ipr_cmd->queue);
708
709 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
710 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
711
712 if (ipr_cmd->scsi_cmd)
713 ipr_cmd->done = ipr_scsi_eh_done;
Brian King35a39692006-09-25 12:39:20 -0500714 else if (ipr_cmd->qc)
715 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
717 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
718 del_timer(&ipr_cmd->timer);
719 ipr_cmd->done(ipr_cmd);
720 }
721
722 LEAVE;
723}
724
725/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800726 * ipr_send_command - Send driver initiated requests.
727 * @ipr_cmd: ipr command struct
728 *
729 * This function sends a command to the adapter using the correct write call.
730 * In the case of sis64, calculate the ioarcb size required. Then or in the
731 * appropriate bits.
732 *
733 * Return value:
734 * none
735 **/
736static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
737{
738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
739 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
740
741 if (ioa_cfg->sis64) {
742 /* The default size is 256 bytes */
743 send_dma_addr |= 0x1;
744
745 /* If the number of ioadls * size of ioadl > 128 bytes,
746 then use a 512 byte ioarcb */
747 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
748 send_dma_addr |= 0x4;
749 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
750 } else
751 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
752}
753
754/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 * ipr_do_req - Send driver initiated requests.
756 * @ipr_cmd: ipr command struct
757 * @done: done function
758 * @timeout_func: timeout function
759 * @timeout: timeout value
760 *
761 * This function sends the specified command to the adapter with the
762 * timeout given. The done function is invoked on command completion.
763 *
764 * Return value:
765 * none
766 **/
767static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
768 void (*done) (struct ipr_cmnd *),
769 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
770{
771 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
772
773 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
774
775 ipr_cmd->done = done;
776
777 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
778 ipr_cmd->timer.expires = jiffies + timeout;
779 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
780
781 add_timer(&ipr_cmd->timer);
782
783 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
784
785 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800786
787 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788}
789
790/**
791 * ipr_internal_cmd_done - Op done function for an internally generated op.
792 * @ipr_cmd: ipr command struct
793 *
794 * This function is the op done function for an internally generated,
795 * blocking op. It simply wakes the sleeping thread.
796 *
797 * Return value:
798 * none
799 **/
800static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
801{
802 if (ipr_cmd->sibling)
803 ipr_cmd->sibling = NULL;
804 else
805 complete(&ipr_cmd->completion);
806}
807
808/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800809 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
810 * @ipr_cmd: ipr command struct
811 * @dma_addr: dma address
812 * @len: transfer length
813 * @flags: ioadl flag value
814 *
815 * This function initializes an ioadl in the case where there is only a single
816 * descriptor.
817 *
818 * Return value:
819 * nothing
820 **/
821static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
822 u32 len, int flags)
823{
824 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
825 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
826
827 ipr_cmd->dma_use_sg = 1;
828
829 if (ipr_cmd->ioa_cfg->sis64) {
830 ioadl64->flags = cpu_to_be32(flags);
831 ioadl64->data_len = cpu_to_be32(len);
832 ioadl64->address = cpu_to_be64(dma_addr);
833
834 ipr_cmd->ioarcb.ioadl_len =
835 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
836 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
837 } else {
838 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
839 ioadl->address = cpu_to_be32(dma_addr);
840
841 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
842 ipr_cmd->ioarcb.read_ioadl_len =
843 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
844 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
845 } else {
846 ipr_cmd->ioarcb.ioadl_len =
847 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
848 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
849 }
850 }
851}
852
853/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 * ipr_send_blocking_cmd - Send command and sleep on its completion.
855 * @ipr_cmd: ipr command struct
856 * @timeout_func: function to invoke if command times out
857 * @timeout: timeout
858 *
859 * Return value:
860 * none
861 **/
862static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
863 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
864 u32 timeout)
865{
866 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
867
868 init_completion(&ipr_cmd->completion);
869 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
870
871 spin_unlock_irq(ioa_cfg->host->host_lock);
872 wait_for_completion(&ipr_cmd->completion);
873 spin_lock_irq(ioa_cfg->host->host_lock);
874}
875
876/**
877 * ipr_send_hcam - Send an HCAM to the adapter.
878 * @ioa_cfg: ioa config struct
879 * @type: HCAM type
880 * @hostrcb: hostrcb struct
881 *
882 * This function will send a Host Controlled Async command to the adapter.
883 * If HCAMs are currently not allowed to be issued to the adapter, it will
884 * place the hostrcb on the free queue.
885 *
886 * Return value:
887 * none
888 **/
889static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
890 struct ipr_hostrcb *hostrcb)
891{
892 struct ipr_cmnd *ipr_cmd;
893 struct ipr_ioarcb *ioarcb;
894
895 if (ioa_cfg->allow_cmds) {
896 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
897 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
898 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
899
900 ipr_cmd->u.hostrcb = hostrcb;
901 ioarcb = &ipr_cmd->ioarcb;
902
903 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
904 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
905 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
906 ioarcb->cmd_pkt.cdb[1] = type;
907 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
908 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
909
Wayne Boyera32c0552010-02-19 13:23:36 -0800910 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
911 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
914 ipr_cmd->done = ipr_process_ccn;
915 else
916 ipr_cmd->done = ipr_process_error;
917
918 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
919
920 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800921
922 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 } else {
924 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
925 }
926}
927
928/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800929 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800931 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 *
933 * Return value:
934 * none
935 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800936static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800938 switch(proto) {
939 case IPR_PROTO_SATA:
940 case IPR_PROTO_SAS_STP:
941 res->ata_class = ATA_DEV_ATA;
942 break;
943 case IPR_PROTO_SATA_ATAPI:
944 case IPR_PROTO_SAS_STP_ATAPI:
945 res->ata_class = ATA_DEV_ATAPI;
946 break;
947 default:
948 res->ata_class = ATA_DEV_UNKNOWN;
949 break;
950 };
951}
952
953/**
954 * ipr_init_res_entry - Initialize a resource entry struct.
955 * @res: resource entry struct
956 * @cfgtew: config table entry wrapper struct
957 *
958 * Return value:
959 * none
960 **/
961static void ipr_init_res_entry(struct ipr_resource_entry *res,
962 struct ipr_config_table_entry_wrapper *cfgtew)
963{
964 int found = 0;
965 unsigned int proto;
966 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
967 struct ipr_resource_entry *gscsi_res = NULL;
968
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -0600969 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 res->in_erp = 0;
971 res->add_to_ml = 0;
972 res->del_from_ml = 0;
973 res->resetting_device = 0;
974 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -0500975 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800976
977 if (ioa_cfg->sis64) {
978 proto = cfgtew->u.cfgte64->proto;
979 res->res_flags = cfgtew->u.cfgte64->res_flags;
980 res->qmodel = IPR_QUEUEING_MODEL64(res);
981 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
982
983 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
984 sizeof(res->res_path));
985
986 res->bus = 0;
987 res->lun = scsilun_to_int(&res->dev_lun);
988
989 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
990 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
991 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
992 found = 1;
993 res->target = gscsi_res->target;
994 break;
995 }
996 }
997 if (!found) {
998 res->target = find_first_zero_bit(ioa_cfg->target_ids,
999 ioa_cfg->max_devs_supported);
1000 set_bit(res->target, ioa_cfg->target_ids);
1001 }
1002
1003 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1004 sizeof(res->dev_lun.scsi_lun));
1005 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1006 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1007 res->target = 0;
1008 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1009 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1010 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1011 ioa_cfg->max_devs_supported);
1012 set_bit(res->target, ioa_cfg->array_ids);
1013 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1014 res->bus = IPR_VSET_VIRTUAL_BUS;
1015 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1016 ioa_cfg->max_devs_supported);
1017 set_bit(res->target, ioa_cfg->vset_ids);
1018 } else {
1019 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1020 ioa_cfg->max_devs_supported);
1021 set_bit(res->target, ioa_cfg->target_ids);
1022 }
1023 } else {
1024 proto = cfgtew->u.cfgte->proto;
1025 res->qmodel = IPR_QUEUEING_MODEL(res);
1026 res->flags = cfgtew->u.cfgte->flags;
1027 if (res->flags & IPR_IS_IOA_RESOURCE)
1028 res->type = IPR_RES_TYPE_IOAFP;
1029 else
1030 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1031
1032 res->bus = cfgtew->u.cfgte->res_addr.bus;
1033 res->target = cfgtew->u.cfgte->res_addr.target;
1034 res->lun = cfgtew->u.cfgte->res_addr.lun;
1035 }
1036
1037 ipr_update_ata_class(res, proto);
1038}
1039
1040/**
1041 * ipr_is_same_device - Determine if two devices are the same.
1042 * @res: resource entry struct
1043 * @cfgtew: config table entry wrapper struct
1044 *
1045 * Return value:
1046 * 1 if the devices are the same / 0 otherwise
1047 **/
1048static int ipr_is_same_device(struct ipr_resource_entry *res,
1049 struct ipr_config_table_entry_wrapper *cfgtew)
1050{
1051 if (res->ioa_cfg->sis64) {
1052 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1053 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1054 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1055 sizeof(cfgtew->u.cfgte64->lun))) {
1056 return 1;
1057 }
1058 } else {
1059 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1060 res->target == cfgtew->u.cfgte->res_addr.target &&
1061 res->lun == cfgtew->u.cfgte->res_addr.lun)
1062 return 1;
1063 }
1064
1065 return 0;
1066}
1067
1068/**
1069 * ipr_format_resource_path - Format the resource path for printing.
1070 * @res_path: resource path
1071 * @buf: buffer
1072 *
1073 * Return value:
1074 * pointer to buffer
1075 **/
1076static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1077{
1078 int i;
1079
1080 sprintf(buffer, "%02X", res_path[0]);
1081 for (i=1; res_path[i] != 0xff; i++)
Wayne Boyer4565e372010-02-19 13:24:07 -08001082 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001083
1084 return buffer;
1085}
1086
1087/**
1088 * ipr_update_res_entry - Update the resource entry.
1089 * @res: resource entry struct
1090 * @cfgtew: config table entry wrapper struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ipr_update_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1097{
1098 char buffer[IPR_MAX_RES_PATH_LENGTH];
1099 unsigned int proto;
1100 int new_path = 0;
1101
1102 if (res->ioa_cfg->sis64) {
1103 res->flags = cfgtew->u.cfgte64->flags;
1104 res->res_flags = cfgtew->u.cfgte64->res_flags;
1105 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1106
1107 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1108 sizeof(struct ipr_std_inq_data));
1109
1110 res->qmodel = IPR_QUEUEING_MODEL64(res);
1111 proto = cfgtew->u.cfgte64->proto;
1112 res->res_handle = cfgtew->u.cfgte64->res_handle;
1113 res->dev_id = cfgtew->u.cfgte64->dev_id;
1114
1115 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1116 sizeof(res->dev_lun.scsi_lun));
1117
1118 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1119 sizeof(res->res_path))) {
1120 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path));
1122 new_path = 1;
1123 }
1124
1125 if (res->sdev && new_path)
1126 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1127 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1128 } else {
1129 res->flags = cfgtew->u.cfgte->flags;
1130 if (res->flags & IPR_IS_IOA_RESOURCE)
1131 res->type = IPR_RES_TYPE_IOAFP;
1132 else
1133 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1134
1135 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1136 sizeof(struct ipr_std_inq_data));
1137
1138 res->qmodel = IPR_QUEUEING_MODEL(res);
1139 proto = cfgtew->u.cfgte->proto;
1140 res->res_handle = cfgtew->u.cfgte->res_handle;
1141 }
1142
1143 ipr_update_ata_class(res, proto);
1144}
1145
1146/**
1147 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1148 * for the resource.
1149 * @res: resource entry struct
1150 * @cfgtew: config table entry wrapper struct
1151 *
1152 * Return value:
1153 * none
1154 **/
1155static void ipr_clear_res_target(struct ipr_resource_entry *res)
1156{
1157 struct ipr_resource_entry *gscsi_res = NULL;
1158 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1159
1160 if (!ioa_cfg->sis64)
1161 return;
1162
1163 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1164 clear_bit(res->target, ioa_cfg->array_ids);
1165 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1166 clear_bit(res->target, ioa_cfg->vset_ids);
1167 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1168 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1169 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1170 return;
1171 clear_bit(res->target, ioa_cfg->target_ids);
1172
1173 } else if (res->bus == 0)
1174 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175}
1176
1177/**
1178 * ipr_handle_config_change - Handle a config change from the adapter
1179 * @ioa_cfg: ioa config struct
1180 * @hostrcb: hostrcb
1181 *
1182 * Return value:
1183 * none
1184 **/
1185static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001186 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
1188 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001189 struct ipr_config_table_entry_wrapper cfgtew;
1190 __be32 cc_res_handle;
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 u32 is_ndn = 1;
1193
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001194 if (ioa_cfg->sis64) {
1195 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1196 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1197 } else {
1198 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1199 cc_res_handle = cfgtew.u.cfgte->res_handle;
1200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001203 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 is_ndn = 0;
1205 break;
1206 }
1207 }
1208
1209 if (is_ndn) {
1210 if (list_empty(&ioa_cfg->free_res_q)) {
1211 ipr_send_hcam(ioa_cfg,
1212 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1213 hostrcb);
1214 return;
1215 }
1216
1217 res = list_entry(ioa_cfg->free_res_q.next,
1218 struct ipr_resource_entry, queue);
1219
1220 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001221 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1223 }
1224
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001225 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1228 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001230 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 if (ioa_cfg->allow_ml_add_del)
1232 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001233 } else {
1234 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 } else if (!res->sdev) {
1238 res->add_to_ml = 1;
1239 if (ioa_cfg->allow_ml_add_del)
1240 schedule_work(&ioa_cfg->work_q);
1241 }
1242
1243 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1244}
1245
1246/**
1247 * ipr_process_ccn - Op done function for a CCN.
1248 * @ipr_cmd: ipr command struct
1249 *
1250 * This function is the op done function for a configuration
1251 * change notification host controlled async from the adapter.
1252 *
1253 * Return value:
1254 * none
1255 **/
1256static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1257{
1258 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1259 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1260 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1261
1262 list_del(&hostrcb->queue);
1263 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1264
1265 if (ioasc) {
1266 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1267 dev_err(&ioa_cfg->pdev->dev,
1268 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1269
1270 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1271 } else {
1272 ipr_handle_config_change(ioa_cfg, hostrcb);
1273 }
1274}
1275
1276/**
Brian King8cf093e2007-04-26 16:00:14 -05001277 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1278 * @i: index into buffer
1279 * @buf: string to modify
1280 *
1281 * This function will strip all trailing whitespace, pad the end
1282 * of the string with a single space, and NULL terminate the string.
1283 *
1284 * Return value:
1285 * new length of string
1286 **/
1287static int strip_and_pad_whitespace(int i, char *buf)
1288{
1289 while (i && buf[i] == ' ')
1290 i--;
1291 buf[i+1] = ' ';
1292 buf[i+2] = '\0';
1293 return i + 2;
1294}
1295
1296/**
1297 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1298 * @prefix: string to print at start of printk
1299 * @hostrcb: hostrcb pointer
1300 * @vpd: vendor/product id/sn struct
1301 *
1302 * Return value:
1303 * none
1304 **/
1305static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1306 struct ipr_vpd *vpd)
1307{
1308 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1309 int i = 0;
1310
1311 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1312 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1313
1314 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1315 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1316
1317 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1318 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1319
1320 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1321}
1322
1323/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001325 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 *
1327 * Return value:
1328 * none
1329 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001330static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1333 + IPR_SERIAL_NUM_LEN];
1334
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001335 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1336 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 IPR_PROD_ID_LEN);
1338 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1339 ipr_err("Vendor/Product ID: %s\n", buffer);
1340
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001341 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1343 ipr_err(" Serial Number: %s\n", buffer);
1344}
1345
1346/**
Brian King8cf093e2007-04-26 16:00:14 -05001347 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1348 * @prefix: string to print at start of printk
1349 * @hostrcb: hostrcb pointer
1350 * @vpd: vendor/product id/sn/wwn struct
1351 *
1352 * Return value:
1353 * none
1354 **/
1355static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1356 struct ipr_ext_vpd *vpd)
1357{
1358 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1359 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1360 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1361}
1362
1363/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001364 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1365 * @vpd: vendor/product id/sn/wwn struct
1366 *
1367 * Return value:
1368 * none
1369 **/
1370static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1371{
1372 ipr_log_vpd(&vpd->vpd);
1373 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1374 be32_to_cpu(vpd->wwid[1]));
1375}
1376
1377/**
1378 * ipr_log_enhanced_cache_error - Log a cache error.
1379 * @ioa_cfg: ioa config struct
1380 * @hostrcb: hostrcb struct
1381 *
1382 * Return value:
1383 * none
1384 **/
1385static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1386 struct ipr_hostrcb *hostrcb)
1387{
Wayne Boyer4565e372010-02-19 13:24:07 -08001388 struct ipr_hostrcb_type_12_error *error;
1389
1390 if (ioa_cfg->sis64)
1391 error = &hostrcb->hcam.u.error64.u.type_12_error;
1392 else
1393 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001394
1395 ipr_err("-----Current Configuration-----\n");
1396 ipr_err("Cache Directory Card Information:\n");
1397 ipr_log_ext_vpd(&error->ioa_vpd);
1398 ipr_err("Adapter Card Information:\n");
1399 ipr_log_ext_vpd(&error->cfc_vpd);
1400
1401 ipr_err("-----Expected Configuration-----\n");
1402 ipr_err("Cache Directory Card Information:\n");
1403 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1404 ipr_err("Adapter Card Information:\n");
1405 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1406
1407 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1408 be32_to_cpu(error->ioa_data[0]),
1409 be32_to_cpu(error->ioa_data[1]),
1410 be32_to_cpu(error->ioa_data[2]));
1411}
1412
1413/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 * ipr_log_cache_error - Log a cache error.
1415 * @ioa_cfg: ioa config struct
1416 * @hostrcb: hostrcb struct
1417 *
1418 * Return value:
1419 * none
1420 **/
1421static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1422 struct ipr_hostrcb *hostrcb)
1423{
1424 struct ipr_hostrcb_type_02_error *error =
1425 &hostrcb->hcam.u.error.u.type_02_error;
1426
1427 ipr_err("-----Current Configuration-----\n");
1428 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001429 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001431 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433 ipr_err("-----Expected Configuration-----\n");
1434 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001435 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001437 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1440 be32_to_cpu(error->ioa_data[0]),
1441 be32_to_cpu(error->ioa_data[1]),
1442 be32_to_cpu(error->ioa_data[2]));
1443}
1444
1445/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001446 * ipr_log_enhanced_config_error - Log a configuration error.
1447 * @ioa_cfg: ioa config struct
1448 * @hostrcb: hostrcb struct
1449 *
1450 * Return value:
1451 * none
1452 **/
1453static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1454 struct ipr_hostrcb *hostrcb)
1455{
1456 int errors_logged, i;
1457 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1458 struct ipr_hostrcb_type_13_error *error;
1459
1460 error = &hostrcb->hcam.u.error.u.type_13_error;
1461 errors_logged = be32_to_cpu(error->errors_logged);
1462
1463 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1464 be32_to_cpu(error->errors_detected), errors_logged);
1465
1466 dev_entry = error->dev;
1467
1468 for (i = 0; i < errors_logged; i++, dev_entry++) {
1469 ipr_err_separator;
1470
1471 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1472 ipr_log_ext_vpd(&dev_entry->vpd);
1473
1474 ipr_err("-----New Device Information-----\n");
1475 ipr_log_ext_vpd(&dev_entry->new_vpd);
1476
1477 ipr_err("Cache Directory Card Information:\n");
1478 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1479
1480 ipr_err("Adapter Card Information:\n");
1481 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1482 }
1483}
1484
1485/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001486 * ipr_log_sis64_config_error - Log a device error.
1487 * @ioa_cfg: ioa config struct
1488 * @hostrcb: hostrcb struct
1489 *
1490 * Return value:
1491 * none
1492 **/
1493static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1494 struct ipr_hostrcb *hostrcb)
1495{
1496 int errors_logged, i;
1497 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1498 struct ipr_hostrcb_type_23_error *error;
1499 char buffer[IPR_MAX_RES_PATH_LENGTH];
1500
1501 error = &hostrcb->hcam.u.error64.u.type_23_error;
1502 errors_logged = be32_to_cpu(error->errors_logged);
1503
1504 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1505 be32_to_cpu(error->errors_detected), errors_logged);
1506
1507 dev_entry = error->dev;
1508
1509 for (i = 0; i < errors_logged; i++, dev_entry++) {
1510 ipr_err_separator;
1511
1512 ipr_err("Device %d : %s", i + 1,
1513 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1514 ipr_log_ext_vpd(&dev_entry->vpd);
1515
1516 ipr_err("-----New Device Information-----\n");
1517 ipr_log_ext_vpd(&dev_entry->new_vpd);
1518
1519 ipr_err("Cache Directory Card Information:\n");
1520 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1521
1522 ipr_err("Adapter Card Information:\n");
1523 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1524 }
1525}
1526
1527/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 * ipr_log_config_error - Log a configuration error.
1529 * @ioa_cfg: ioa config struct
1530 * @hostrcb: hostrcb struct
1531 *
1532 * Return value:
1533 * none
1534 **/
1535static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1536 struct ipr_hostrcb *hostrcb)
1537{
1538 int errors_logged, i;
1539 struct ipr_hostrcb_device_data_entry *dev_entry;
1540 struct ipr_hostrcb_type_03_error *error;
1541
1542 error = &hostrcb->hcam.u.error.u.type_03_error;
1543 errors_logged = be32_to_cpu(error->errors_logged);
1544
1545 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1546 be32_to_cpu(error->errors_detected), errors_logged);
1547
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001548 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550 for (i = 0; i < errors_logged; i++, dev_entry++) {
1551 ipr_err_separator;
1552
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001553 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001554 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001557 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001560 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
1562 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001563 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1566 be32_to_cpu(dev_entry->ioa_data[0]),
1567 be32_to_cpu(dev_entry->ioa_data[1]),
1568 be32_to_cpu(dev_entry->ioa_data[2]),
1569 be32_to_cpu(dev_entry->ioa_data[3]),
1570 be32_to_cpu(dev_entry->ioa_data[4]));
1571 }
1572}
1573
1574/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001575 * ipr_log_enhanced_array_error - Log an array configuration error.
1576 * @ioa_cfg: ioa config struct
1577 * @hostrcb: hostrcb struct
1578 *
1579 * Return value:
1580 * none
1581 **/
1582static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1583 struct ipr_hostrcb *hostrcb)
1584{
1585 int i, num_entries;
1586 struct ipr_hostrcb_type_14_error *error;
1587 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1588 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1589
1590 error = &hostrcb->hcam.u.error.u.type_14_error;
1591
1592 ipr_err_separator;
1593
1594 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1595 error->protection_level,
1596 ioa_cfg->host->host_no,
1597 error->last_func_vset_res_addr.bus,
1598 error->last_func_vset_res_addr.target,
1599 error->last_func_vset_res_addr.lun);
1600
1601 ipr_err_separator;
1602
1603 array_entry = error->array_member;
1604 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1605 sizeof(error->array_member));
1606
1607 for (i = 0; i < num_entries; i++, array_entry++) {
1608 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1609 continue;
1610
1611 if (be32_to_cpu(error->exposed_mode_adn) == i)
1612 ipr_err("Exposed Array Member %d:\n", i);
1613 else
1614 ipr_err("Array Member %d:\n", i);
1615
1616 ipr_log_ext_vpd(&array_entry->vpd);
1617 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1618 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1619 "Expected Location");
1620
1621 ipr_err_separator;
1622 }
1623}
1624
1625/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 * ipr_log_array_error - Log an array configuration error.
1627 * @ioa_cfg: ioa config struct
1628 * @hostrcb: hostrcb struct
1629 *
1630 * Return value:
1631 * none
1632 **/
1633static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1634 struct ipr_hostrcb *hostrcb)
1635{
1636 int i;
1637 struct ipr_hostrcb_type_04_error *error;
1638 struct ipr_hostrcb_array_data_entry *array_entry;
1639 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1640
1641 error = &hostrcb->hcam.u.error.u.type_04_error;
1642
1643 ipr_err_separator;
1644
1645 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1646 error->protection_level,
1647 ioa_cfg->host->host_no,
1648 error->last_func_vset_res_addr.bus,
1649 error->last_func_vset_res_addr.target,
1650 error->last_func_vset_res_addr.lun);
1651
1652 ipr_err_separator;
1653
1654 array_entry = error->array_member;
1655
1656 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001657 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 continue;
1659
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001660 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001662 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001665 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001667 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1668 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1669 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671 ipr_err_separator;
1672
1673 if (i == 9)
1674 array_entry = error->array_member2;
1675 else
1676 array_entry++;
1677 }
1678}
1679
1680/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001681 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001682 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001683 * @data: IOA error data
1684 * @len: data length
1685 *
1686 * Return value:
1687 * none
1688 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001689static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001690{
1691 int i;
1692
1693 if (len == 0)
1694 return;
1695
Brian Kingac719ab2006-11-21 10:28:42 -06001696 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1697 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1698
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001699 for (i = 0; i < len / 4; i += 4) {
1700 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1701 be32_to_cpu(data[i]),
1702 be32_to_cpu(data[i+1]),
1703 be32_to_cpu(data[i+2]),
1704 be32_to_cpu(data[i+3]));
1705 }
1706}
1707
1708/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001709 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1710 * @ioa_cfg: ioa config struct
1711 * @hostrcb: hostrcb struct
1712 *
1713 * Return value:
1714 * none
1715 **/
1716static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1717 struct ipr_hostrcb *hostrcb)
1718{
1719 struct ipr_hostrcb_type_17_error *error;
1720
Wayne Boyer4565e372010-02-19 13:24:07 -08001721 if (ioa_cfg->sis64)
1722 error = &hostrcb->hcam.u.error64.u.type_17_error;
1723 else
1724 error = &hostrcb->hcam.u.error.u.type_17_error;
1725
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001726 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001727 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001728
Brian King8cf093e2007-04-26 16:00:14 -05001729 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1730 be32_to_cpu(hostrcb->hcam.u.error.prc));
1731 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001732 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001733 be32_to_cpu(hostrcb->hcam.length) -
1734 (offsetof(struct ipr_hostrcb_error, u) +
1735 offsetof(struct ipr_hostrcb_type_17_error, data)));
1736}
1737
1738/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001739 * ipr_log_dual_ioa_error - Log a dual adapter error.
1740 * @ioa_cfg: ioa config struct
1741 * @hostrcb: hostrcb struct
1742 *
1743 * Return value:
1744 * none
1745 **/
1746static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1747 struct ipr_hostrcb *hostrcb)
1748{
1749 struct ipr_hostrcb_type_07_error *error;
1750
1751 error = &hostrcb->hcam.u.error.u.type_07_error;
1752 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001753 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001754
Brian King8cf093e2007-04-26 16:00:14 -05001755 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1756 be32_to_cpu(hostrcb->hcam.u.error.prc));
1757 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001758 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001759 be32_to_cpu(hostrcb->hcam.length) -
1760 (offsetof(struct ipr_hostrcb_error, u) +
1761 offsetof(struct ipr_hostrcb_type_07_error, data)));
1762}
1763
Brian King49dc6a12006-11-21 10:28:35 -06001764static const struct {
1765 u8 active;
1766 char *desc;
1767} path_active_desc[] = {
1768 { IPR_PATH_NO_INFO, "Path" },
1769 { IPR_PATH_ACTIVE, "Active path" },
1770 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1771};
1772
1773static const struct {
1774 u8 state;
1775 char *desc;
1776} path_state_desc[] = {
1777 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1778 { IPR_PATH_HEALTHY, "is healthy" },
1779 { IPR_PATH_DEGRADED, "is degraded" },
1780 { IPR_PATH_FAILED, "is failed" }
1781};
1782
1783/**
1784 * ipr_log_fabric_path - Log a fabric path error
1785 * @hostrcb: hostrcb struct
1786 * @fabric: fabric descriptor
1787 *
1788 * Return value:
1789 * none
1790 **/
1791static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1792 struct ipr_hostrcb_fabric_desc *fabric)
1793{
1794 int i, j;
1795 u8 path_state = fabric->path_state;
1796 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1797 u8 state = path_state & IPR_PATH_STATE_MASK;
1798
1799 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1800 if (path_active_desc[i].active != active)
1801 continue;
1802
1803 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1804 if (path_state_desc[j].state != state)
1805 continue;
1806
1807 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1808 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1809 path_active_desc[i].desc, path_state_desc[j].desc,
1810 fabric->ioa_port);
1811 } else if (fabric->cascaded_expander == 0xff) {
1812 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1813 path_active_desc[i].desc, path_state_desc[j].desc,
1814 fabric->ioa_port, fabric->phy);
1815 } else if (fabric->phy == 0xff) {
1816 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1817 path_active_desc[i].desc, path_state_desc[j].desc,
1818 fabric->ioa_port, fabric->cascaded_expander);
1819 } else {
1820 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1821 path_active_desc[i].desc, path_state_desc[j].desc,
1822 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1823 }
1824 return;
1825 }
1826 }
1827
1828 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1829 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1830}
1831
Wayne Boyer4565e372010-02-19 13:24:07 -08001832/**
1833 * ipr_log64_fabric_path - Log a fabric path error
1834 * @hostrcb: hostrcb struct
1835 * @fabric: fabric descriptor
1836 *
1837 * Return value:
1838 * none
1839 **/
1840static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1841 struct ipr_hostrcb64_fabric_desc *fabric)
1842{
1843 int i, j;
1844 u8 path_state = fabric->path_state;
1845 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1846 u8 state = path_state & IPR_PATH_STATE_MASK;
1847 char buffer[IPR_MAX_RES_PATH_LENGTH];
1848
1849 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1850 if (path_active_desc[i].active != active)
1851 continue;
1852
1853 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1854 if (path_state_desc[j].state != state)
1855 continue;
1856
1857 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1858 path_active_desc[i].desc, path_state_desc[j].desc,
1859 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1860 return;
1861 }
1862 }
1863
1864 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1865 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1866}
1867
Brian King49dc6a12006-11-21 10:28:35 -06001868static const struct {
1869 u8 type;
1870 char *desc;
1871} path_type_desc[] = {
1872 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1873 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1874 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1875 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1876};
1877
1878static const struct {
1879 u8 status;
1880 char *desc;
1881} path_status_desc[] = {
1882 { IPR_PATH_CFG_NO_PROB, "Functional" },
1883 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1884 { IPR_PATH_CFG_FAILED, "Failed" },
1885 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1886 { IPR_PATH_NOT_DETECTED, "Missing" },
1887 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1888};
1889
1890static const char *link_rate[] = {
1891 "unknown",
1892 "disabled",
1893 "phy reset problem",
1894 "spinup hold",
1895 "port selector",
1896 "unknown",
1897 "unknown",
1898 "unknown",
1899 "1.5Gbps",
1900 "3.0Gbps",
1901 "unknown",
1902 "unknown",
1903 "unknown",
1904 "unknown",
1905 "unknown",
1906 "unknown"
1907};
1908
1909/**
1910 * ipr_log_path_elem - Log a fabric path element.
1911 * @hostrcb: hostrcb struct
1912 * @cfg: fabric path element struct
1913 *
1914 * Return value:
1915 * none
1916 **/
1917static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1918 struct ipr_hostrcb_config_element *cfg)
1919{
1920 int i, j;
1921 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1922 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1923
1924 if (type == IPR_PATH_CFG_NOT_EXIST)
1925 return;
1926
1927 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1928 if (path_type_desc[i].type != type)
1929 continue;
1930
1931 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1932 if (path_status_desc[j].status != status)
1933 continue;
1934
1935 if (type == IPR_PATH_CFG_IOA_PORT) {
1936 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1937 path_status_desc[j].desc, path_type_desc[i].desc,
1938 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1939 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1940 } else {
1941 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1942 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1943 path_status_desc[j].desc, path_type_desc[i].desc,
1944 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1945 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1946 } else if (cfg->cascaded_expander == 0xff) {
1947 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1948 "WWN=%08X%08X\n", path_status_desc[j].desc,
1949 path_type_desc[i].desc, cfg->phy,
1950 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1951 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1952 } else if (cfg->phy == 0xff) {
1953 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1954 "WWN=%08X%08X\n", path_status_desc[j].desc,
1955 path_type_desc[i].desc, cfg->cascaded_expander,
1956 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1957 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1958 } else {
1959 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1960 "WWN=%08X%08X\n", path_status_desc[j].desc,
1961 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1962 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1963 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1964 }
1965 }
1966 return;
1967 }
1968 }
1969
1970 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1971 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1972 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1973 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1974}
1975
1976/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001977 * ipr_log64_path_elem - Log a fabric path element.
1978 * @hostrcb: hostrcb struct
1979 * @cfg: fabric path element struct
1980 *
1981 * Return value:
1982 * none
1983 **/
1984static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
1985 struct ipr_hostrcb64_config_element *cfg)
1986{
1987 int i, j;
1988 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
1989 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1990 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1991 char buffer[IPR_MAX_RES_PATH_LENGTH];
1992
1993 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
1994 return;
1995
1996 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997 if (path_type_desc[i].type != type)
1998 continue;
1999
2000 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001 if (path_status_desc[j].status != status)
2002 continue;
2003
2004 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2005 path_status_desc[j].desc, path_type_desc[i].desc,
2006 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2007 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 return;
2010 }
2011 }
2012 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2013 "WWN=%08X%08X\n", cfg->type_status,
2014 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2015 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2016 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2017}
2018
2019/**
Brian King49dc6a12006-11-21 10:28:35 -06002020 * ipr_log_fabric_error - Log a fabric error.
2021 * @ioa_cfg: ioa config struct
2022 * @hostrcb: hostrcb struct
2023 *
2024 * Return value:
2025 * none
2026 **/
2027static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2028 struct ipr_hostrcb *hostrcb)
2029{
2030 struct ipr_hostrcb_type_20_error *error;
2031 struct ipr_hostrcb_fabric_desc *fabric;
2032 struct ipr_hostrcb_config_element *cfg;
2033 int i, add_len;
2034
2035 error = &hostrcb->hcam.u.error.u.type_20_error;
2036 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2037 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2038
2039 add_len = be32_to_cpu(hostrcb->hcam.length) -
2040 (offsetof(struct ipr_hostrcb_error, u) +
2041 offsetof(struct ipr_hostrcb_type_20_error, desc));
2042
2043 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2044 ipr_log_fabric_path(hostrcb, fabric);
2045 for_each_fabric_cfg(fabric, cfg)
2046 ipr_log_path_elem(hostrcb, cfg);
2047
2048 add_len -= be16_to_cpu(fabric->length);
2049 fabric = (struct ipr_hostrcb_fabric_desc *)
2050 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2051 }
2052
Brian Kingac719ab2006-11-21 10:28:42 -06002053 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002054}
2055
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002056/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002057 * ipr_log_sis64_array_error - Log a sis64 array error.
2058 * @ioa_cfg: ioa config struct
2059 * @hostrcb: hostrcb struct
2060 *
2061 * Return value:
2062 * none
2063 **/
2064static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2065 struct ipr_hostrcb *hostrcb)
2066{
2067 int i, num_entries;
2068 struct ipr_hostrcb_type_24_error *error;
2069 struct ipr_hostrcb64_array_data_entry *array_entry;
2070 char buffer[IPR_MAX_RES_PATH_LENGTH];
2071 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2072
2073 error = &hostrcb->hcam.u.error64.u.type_24_error;
2074
2075 ipr_err_separator;
2076
2077 ipr_err("RAID %s Array Configuration: %s\n",
2078 error->protection_level,
2079 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2080
2081 ipr_err_separator;
2082
2083 array_entry = error->array_member;
2084 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2085 sizeof(error->array_member));
2086
2087 for (i = 0; i < num_entries; i++, array_entry++) {
2088
2089 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2090 continue;
2091
2092 if (error->exposed_mode_adn == i)
2093 ipr_err("Exposed Array Member %d:\n", i);
2094 else
2095 ipr_err("Array Member %d:\n", i);
2096
2097 ipr_err("Array Member %d:\n", i);
2098 ipr_log_ext_vpd(&array_entry->vpd);
2099 ipr_err("Current Location: %s",
2100 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2101 ipr_err("Expected Location: %s",
2102 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2103
2104 ipr_err_separator;
2105 }
2106}
2107
2108/**
2109 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2110 * @ioa_cfg: ioa config struct
2111 * @hostrcb: hostrcb struct
2112 *
2113 * Return value:
2114 * none
2115 **/
2116static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2117 struct ipr_hostrcb *hostrcb)
2118{
2119 struct ipr_hostrcb_type_30_error *error;
2120 struct ipr_hostrcb64_fabric_desc *fabric;
2121 struct ipr_hostrcb64_config_element *cfg;
2122 int i, add_len;
2123
2124 error = &hostrcb->hcam.u.error64.u.type_30_error;
2125
2126 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2127 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2128
2129 add_len = be32_to_cpu(hostrcb->hcam.length) -
2130 (offsetof(struct ipr_hostrcb64_error, u) +
2131 offsetof(struct ipr_hostrcb_type_30_error, desc));
2132
2133 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2134 ipr_log64_fabric_path(hostrcb, fabric);
2135 for_each_fabric_cfg(fabric, cfg)
2136 ipr_log64_path_elem(hostrcb, cfg);
2137
2138 add_len -= be16_to_cpu(fabric->length);
2139 fabric = (struct ipr_hostrcb64_fabric_desc *)
2140 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2141 }
2142
2143 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2144}
2145
2146/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 * ipr_log_generic_error - Log an adapter error.
2148 * @ioa_cfg: ioa config struct
2149 * @hostrcb: hostrcb struct
2150 *
2151 * Return value:
2152 * none
2153 **/
2154static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2155 struct ipr_hostrcb *hostrcb)
2156{
Brian Kingac719ab2006-11-21 10:28:42 -06002157 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002158 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159}
2160
2161/**
2162 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2163 * @ioasc: IOASC
2164 *
2165 * This function will return the index of into the ipr_error_table
2166 * for the specified IOASC. If the IOASC is not in the table,
2167 * 0 will be returned, which points to the entry used for unknown errors.
2168 *
2169 * Return value:
2170 * index into the ipr_error_table
2171 **/
2172static u32 ipr_get_error(u32 ioasc)
2173{
2174 int i;
2175
2176 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002177 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 return i;
2179
2180 return 0;
2181}
2182
2183/**
2184 * ipr_handle_log_data - Log an adapter error.
2185 * @ioa_cfg: ioa config struct
2186 * @hostrcb: hostrcb struct
2187 *
2188 * This function logs an adapter error to the system.
2189 *
2190 * Return value:
2191 * none
2192 **/
2193static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2194 struct ipr_hostrcb *hostrcb)
2195{
2196 u32 ioasc;
2197 int error_index;
2198
2199 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2200 return;
2201
2202 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2203 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2204
Wayne Boyer4565e372010-02-19 13:24:07 -08002205 if (ioa_cfg->sis64)
2206 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2207 else
2208 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Wayne Boyer4565e372010-02-19 13:24:07 -08002210 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2211 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2213 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002214 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 }
2216
2217 error_index = ipr_get_error(ioasc);
2218
2219 if (!ipr_error_table[error_index].log_hcam)
2220 return;
2221
Brian King49dc6a12006-11-21 10:28:35 -06002222 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224 /* Set indication we have logged an error */
2225 ioa_cfg->errors_logged++;
2226
Brian King933916f2007-03-29 12:43:30 -05002227 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002229 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2230 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 case IPR_HOST_RCB_OVERLAY_ID_2:
2234 ipr_log_cache_error(ioa_cfg, hostrcb);
2235 break;
2236 case IPR_HOST_RCB_OVERLAY_ID_3:
2237 ipr_log_config_error(ioa_cfg, hostrcb);
2238 break;
2239 case IPR_HOST_RCB_OVERLAY_ID_4:
2240 case IPR_HOST_RCB_OVERLAY_ID_6:
2241 ipr_log_array_error(ioa_cfg, hostrcb);
2242 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002243 case IPR_HOST_RCB_OVERLAY_ID_7:
2244 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2245 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002246 case IPR_HOST_RCB_OVERLAY_ID_12:
2247 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2248 break;
2249 case IPR_HOST_RCB_OVERLAY_ID_13:
2250 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2251 break;
2252 case IPR_HOST_RCB_OVERLAY_ID_14:
2253 case IPR_HOST_RCB_OVERLAY_ID_16:
2254 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2255 break;
2256 case IPR_HOST_RCB_OVERLAY_ID_17:
2257 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2258 break;
Brian King49dc6a12006-11-21 10:28:35 -06002259 case IPR_HOST_RCB_OVERLAY_ID_20:
2260 ipr_log_fabric_error(ioa_cfg, hostrcb);
2261 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002262 case IPR_HOST_RCB_OVERLAY_ID_23:
2263 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2264 break;
2265 case IPR_HOST_RCB_OVERLAY_ID_24:
2266 case IPR_HOST_RCB_OVERLAY_ID_26:
2267 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2268 break;
2269 case IPR_HOST_RCB_OVERLAY_ID_30:
2270 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2271 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002272 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002275 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 break;
2277 }
2278}
2279
2280/**
2281 * ipr_process_error - Op done function for an adapter error log.
2282 * @ipr_cmd: ipr command struct
2283 *
2284 * This function is the op done function for an error log host
2285 * controlled async from the adapter. It will log the error and
2286 * send the HCAM back to the adapter.
2287 *
2288 * Return value:
2289 * none
2290 **/
2291static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2292{
2293 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2294 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2295 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002296 u32 fd_ioasc;
2297
2298 if (ioa_cfg->sis64)
2299 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2300 else
2301 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 list_del(&hostrcb->queue);
2304 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2305
2306 if (!ioasc) {
2307 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002308 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2309 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2311 dev_err(&ioa_cfg->pdev->dev,
2312 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2313 }
2314
2315 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2316}
2317
2318/**
2319 * ipr_timeout - An internally generated op has timed out.
2320 * @ipr_cmd: ipr command struct
2321 *
2322 * This function blocks host requests and initiates an
2323 * adapter reset.
2324 *
2325 * Return value:
2326 * none
2327 **/
2328static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2329{
2330 unsigned long lock_flags = 0;
2331 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2332
2333 ENTER;
2334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2335
2336 ioa_cfg->errors_logged++;
2337 dev_err(&ioa_cfg->pdev->dev,
2338 "Adapter being reset due to command timeout.\n");
2339
2340 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2341 ioa_cfg->sdt_state = GET_DUMP;
2342
2343 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2344 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2345
2346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2347 LEAVE;
2348}
2349
2350/**
2351 * ipr_oper_timeout - Adapter timed out transitioning to operational
2352 * @ipr_cmd: ipr command struct
2353 *
2354 * This function blocks host requests and initiates an
2355 * adapter reset.
2356 *
2357 * Return value:
2358 * none
2359 **/
2360static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2361{
2362 unsigned long lock_flags = 0;
2363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2364
2365 ENTER;
2366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2367
2368 ioa_cfg->errors_logged++;
2369 dev_err(&ioa_cfg->pdev->dev,
2370 "Adapter timed out transitioning to operational.\n");
2371
2372 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2373 ioa_cfg->sdt_state = GET_DUMP;
2374
2375 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2376 if (ipr_fastfail)
2377 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2378 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2379 }
2380
2381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2382 LEAVE;
2383}
2384
2385/**
2386 * ipr_reset_reload - Reset/Reload the IOA
2387 * @ioa_cfg: ioa config struct
2388 * @shutdown_type: shutdown type
2389 *
2390 * This function resets the adapter and re-initializes it.
2391 * This function assumes that all new host commands have been stopped.
2392 * Return value:
2393 * SUCCESS / FAILED
2394 **/
2395static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2396 enum ipr_shutdown_type shutdown_type)
2397{
2398 if (!ioa_cfg->in_reset_reload)
2399 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2400
2401 spin_unlock_irq(ioa_cfg->host->host_lock);
2402 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2403 spin_lock_irq(ioa_cfg->host->host_lock);
2404
2405 /* If we got hit with a host reset while we were already resetting
2406 the adapter for some reason, and the reset failed. */
2407 if (ioa_cfg->ioa_is_dead) {
2408 ipr_trace;
2409 return FAILED;
2410 }
2411
2412 return SUCCESS;
2413}
2414
2415/**
2416 * ipr_find_ses_entry - Find matching SES in SES table
2417 * @res: resource entry struct of SES
2418 *
2419 * Return value:
2420 * pointer to SES table entry / NULL on failure
2421 **/
2422static const struct ipr_ses_table_entry *
2423ipr_find_ses_entry(struct ipr_resource_entry *res)
2424{
2425 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002426 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2428
2429 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2430 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2431 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002432 vpids = &res->std_inq_data.vpids;
2433 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 matches++;
2435 else
2436 break;
2437 } else
2438 matches++;
2439 }
2440
2441 if (matches == IPR_PROD_ID_LEN)
2442 return ste;
2443 }
2444
2445 return NULL;
2446}
2447
2448/**
2449 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2450 * @ioa_cfg: ioa config struct
2451 * @bus: SCSI bus
2452 * @bus_width: bus width
2453 *
2454 * Return value:
2455 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2456 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2457 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2458 * max 160MHz = max 320MB/sec).
2459 **/
2460static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2461{
2462 struct ipr_resource_entry *res;
2463 const struct ipr_ses_table_entry *ste;
2464 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2465
2466 /* Loop through each config table entry in the config table buffer */
2467 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002468 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 continue;
2470
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002471 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 continue;
2473
2474 if (!(ste = ipr_find_ses_entry(res)))
2475 continue;
2476
2477 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2478 }
2479
2480 return max_xfer_rate;
2481}
2482
2483/**
2484 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2485 * @ioa_cfg: ioa config struct
2486 * @max_delay: max delay in micro-seconds to wait
2487 *
2488 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2489 *
2490 * Return value:
2491 * 0 on success / other on failure
2492 **/
2493static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2494{
2495 volatile u32 pcii_reg;
2496 int delay = 1;
2497
2498 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2499 while (delay < max_delay) {
2500 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2501
2502 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2503 return 0;
2504
2505 /* udelay cannot be used if delay is more than a few milliseconds */
2506 if ((delay / 1000) > MAX_UDELAY_MS)
2507 mdelay(delay / 1000);
2508 else
2509 udelay(delay);
2510
2511 delay += delay;
2512 }
2513 return -EIO;
2514}
2515
2516/**
2517 * ipr_get_ldump_data_section - Dump IOA memory
2518 * @ioa_cfg: ioa config struct
2519 * @start_addr: adapter address to dump
2520 * @dest: destination kernel buffer
2521 * @length_in_words: length to dump in 4 byte words
2522 *
2523 * Return value:
2524 * 0 on success / -EIO on failure
2525 **/
2526static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2527 u32 start_addr,
2528 __be32 *dest, u32 length_in_words)
2529{
2530 volatile u32 temp_pcii_reg;
2531 int i, delay = 0;
2532
2533 /* Write IOA interrupt reg starting LDUMP state */
2534 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2535 ioa_cfg->regs.set_uproc_interrupt_reg);
2536
2537 /* Wait for IO debug acknowledge */
2538 if (ipr_wait_iodbg_ack(ioa_cfg,
2539 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2540 dev_err(&ioa_cfg->pdev->dev,
2541 "IOA dump long data transfer timeout\n");
2542 return -EIO;
2543 }
2544
2545 /* Signal LDUMP interlocked - clear IO debug ack */
2546 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2547 ioa_cfg->regs.clr_interrupt_reg);
2548
2549 /* Write Mailbox with starting address */
2550 writel(start_addr, ioa_cfg->ioa_mailbox);
2551
2552 /* Signal address valid - clear IOA Reset alert */
2553 writel(IPR_UPROCI_RESET_ALERT,
2554 ioa_cfg->regs.clr_uproc_interrupt_reg);
2555
2556 for (i = 0; i < length_in_words; i++) {
2557 /* Wait for IO debug acknowledge */
2558 if (ipr_wait_iodbg_ack(ioa_cfg,
2559 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2560 dev_err(&ioa_cfg->pdev->dev,
2561 "IOA dump short data transfer timeout\n");
2562 return -EIO;
2563 }
2564
2565 /* Read data from mailbox and increment destination pointer */
2566 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2567 dest++;
2568
2569 /* For all but the last word of data, signal data received */
2570 if (i < (length_in_words - 1)) {
2571 /* Signal dump data received - Clear IO debug Ack */
2572 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2573 ioa_cfg->regs.clr_interrupt_reg);
2574 }
2575 }
2576
2577 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2578 writel(IPR_UPROCI_RESET_ALERT,
2579 ioa_cfg->regs.set_uproc_interrupt_reg);
2580
2581 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2582 ioa_cfg->regs.clr_uproc_interrupt_reg);
2583
2584 /* Signal dump data received - Clear IO debug Ack */
2585 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2586 ioa_cfg->regs.clr_interrupt_reg);
2587
2588 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2589 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2590 temp_pcii_reg =
2591 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2592
2593 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2594 return 0;
2595
2596 udelay(10);
2597 delay += 10;
2598 }
2599
2600 return 0;
2601}
2602
2603#ifdef CONFIG_SCSI_IPR_DUMP
2604/**
2605 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2606 * @ioa_cfg: ioa config struct
2607 * @pci_address: adapter address
2608 * @length: length of data to copy
2609 *
2610 * Copy data from PCI adapter to kernel buffer.
2611 * Note: length MUST be a 4 byte multiple
2612 * Return value:
2613 * 0 on success / other on failure
2614 **/
2615static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2616 unsigned long pci_address, u32 length)
2617{
2618 int bytes_copied = 0;
2619 int cur_len, rc, rem_len, rem_page_len;
2620 __be32 *page;
2621 unsigned long lock_flags = 0;
2622 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2623
2624 while (bytes_copied < length &&
2625 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2626 if (ioa_dump->page_offset >= PAGE_SIZE ||
2627 ioa_dump->page_offset == 0) {
2628 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2629
2630 if (!page) {
2631 ipr_trace;
2632 return bytes_copied;
2633 }
2634
2635 ioa_dump->page_offset = 0;
2636 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2637 ioa_dump->next_page_index++;
2638 } else
2639 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2640
2641 rem_len = length - bytes_copied;
2642 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2643 cur_len = min(rem_len, rem_page_len);
2644
2645 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2646 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2647 rc = -EIO;
2648 } else {
2649 rc = ipr_get_ldump_data_section(ioa_cfg,
2650 pci_address + bytes_copied,
2651 &page[ioa_dump->page_offset / 4],
2652 (cur_len / sizeof(u32)));
2653 }
2654 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2655
2656 if (!rc) {
2657 ioa_dump->page_offset += cur_len;
2658 bytes_copied += cur_len;
2659 } else {
2660 ipr_trace;
2661 break;
2662 }
2663 schedule();
2664 }
2665
2666 return bytes_copied;
2667}
2668
2669/**
2670 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2671 * @hdr: dump entry header struct
2672 *
2673 * Return value:
2674 * nothing
2675 **/
2676static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2677{
2678 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2679 hdr->num_elems = 1;
2680 hdr->offset = sizeof(*hdr);
2681 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2682}
2683
2684/**
2685 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2686 * @ioa_cfg: ioa config struct
2687 * @driver_dump: driver dump struct
2688 *
2689 * Return value:
2690 * nothing
2691 **/
2692static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2693 struct ipr_driver_dump *driver_dump)
2694{
2695 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2696
2697 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2698 driver_dump->ioa_type_entry.hdr.len =
2699 sizeof(struct ipr_dump_ioa_type_entry) -
2700 sizeof(struct ipr_dump_entry_header);
2701 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2702 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2703 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2704 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2705 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2706 ucode_vpd->minor_release[1];
2707 driver_dump->hdr.num_entries++;
2708}
2709
2710/**
2711 * ipr_dump_version_data - Fill in the driver version in the dump.
2712 * @ioa_cfg: ioa config struct
2713 * @driver_dump: driver dump struct
2714 *
2715 * Return value:
2716 * nothing
2717 **/
2718static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2719 struct ipr_driver_dump *driver_dump)
2720{
2721 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2722 driver_dump->version_entry.hdr.len =
2723 sizeof(struct ipr_dump_version_entry) -
2724 sizeof(struct ipr_dump_entry_header);
2725 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2726 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2727 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2728 driver_dump->hdr.num_entries++;
2729}
2730
2731/**
2732 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2733 * @ioa_cfg: ioa config struct
2734 * @driver_dump: driver dump struct
2735 *
2736 * Return value:
2737 * nothing
2738 **/
2739static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2740 struct ipr_driver_dump *driver_dump)
2741{
2742 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2743 driver_dump->trace_entry.hdr.len =
2744 sizeof(struct ipr_dump_trace_entry) -
2745 sizeof(struct ipr_dump_entry_header);
2746 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2747 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2748 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2749 driver_dump->hdr.num_entries++;
2750}
2751
2752/**
2753 * ipr_dump_location_data - Fill in the IOA location in the dump.
2754 * @ioa_cfg: ioa config struct
2755 * @driver_dump: driver dump struct
2756 *
2757 * Return value:
2758 * nothing
2759 **/
2760static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2761 struct ipr_driver_dump *driver_dump)
2762{
2763 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2764 driver_dump->location_entry.hdr.len =
2765 sizeof(struct ipr_dump_location_entry) -
2766 sizeof(struct ipr_dump_entry_header);
2767 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2768 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002769 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 driver_dump->hdr.num_entries++;
2771}
2772
2773/**
2774 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2775 * @ioa_cfg: ioa config struct
2776 * @dump: dump struct
2777 *
2778 * Return value:
2779 * nothing
2780 **/
2781static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2782{
2783 unsigned long start_addr, sdt_word;
2784 unsigned long lock_flags = 0;
2785 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2786 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2787 u32 num_entries, start_off, end_off;
2788 u32 bytes_to_copy, bytes_copied, rc;
2789 struct ipr_sdt *sdt;
2790 int i;
2791
2792 ENTER;
2793
2794 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2795
2796 if (ioa_cfg->sdt_state != GET_DUMP) {
2797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2798 return;
2799 }
2800
2801 start_addr = readl(ioa_cfg->ioa_mailbox);
2802
2803 if (!ipr_sdt_is_fmt2(start_addr)) {
2804 dev_err(&ioa_cfg->pdev->dev,
2805 "Invalid dump table format: %lx\n", start_addr);
2806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2807 return;
2808 }
2809
2810 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2811
2812 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2813
2814 /* Initialize the overall dump header */
2815 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2816 driver_dump->hdr.num_entries = 1;
2817 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2818 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2819 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2820 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2821
2822 ipr_dump_version_data(ioa_cfg, driver_dump);
2823 ipr_dump_location_data(ioa_cfg, driver_dump);
2824 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2825 ipr_dump_trace_data(ioa_cfg, driver_dump);
2826
2827 /* Update dump_header */
2828 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2829
2830 /* IOA Dump entry */
2831 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2832 ioa_dump->format = IPR_SDT_FMT2;
2833 ioa_dump->hdr.len = 0;
2834 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2835 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2836
2837 /* First entries in sdt are actually a list of dump addresses and
2838 lengths to gather the real dump data. sdt represents the pointer
2839 to the ioa generated dump table. Dump data will be extracted based
2840 on entries in this table */
2841 sdt = &ioa_dump->sdt;
2842
2843 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2844 sizeof(struct ipr_sdt) / sizeof(__be32));
2845
2846 /* Smart Dump table is ready to use and the first entry is valid */
2847 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2848 dev_err(&ioa_cfg->pdev->dev,
2849 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2850 rc, be32_to_cpu(sdt->hdr.state));
2851 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2852 ioa_cfg->sdt_state = DUMP_OBTAINED;
2853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2854 return;
2855 }
2856
2857 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2858
2859 if (num_entries > IPR_NUM_SDT_ENTRIES)
2860 num_entries = IPR_NUM_SDT_ENTRIES;
2861
2862 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2863
2864 for (i = 0; i < num_entries; i++) {
2865 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2866 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2867 break;
2868 }
2869
2870 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2871 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2872 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2873 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2874
2875 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2876 bytes_to_copy = end_off - start_off;
2877 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2878 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2879 continue;
2880 }
2881
2882 /* Copy data from adapter to driver buffers */
2883 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2884 bytes_to_copy);
2885
2886 ioa_dump->hdr.len += bytes_copied;
2887
2888 if (bytes_copied != bytes_to_copy) {
2889 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2890 break;
2891 }
2892 }
2893 }
2894 }
2895
2896 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2897
2898 /* Update dump_header */
2899 driver_dump->hdr.len += ioa_dump->hdr.len;
2900 wmb();
2901 ioa_cfg->sdt_state = DUMP_OBTAINED;
2902 LEAVE;
2903}
2904
2905#else
2906#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2907#endif
2908
2909/**
2910 * ipr_release_dump - Free adapter dump memory
2911 * @kref: kref struct
2912 *
2913 * Return value:
2914 * nothing
2915 **/
2916static void ipr_release_dump(struct kref *kref)
2917{
2918 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2919 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2920 unsigned long lock_flags = 0;
2921 int i;
2922
2923 ENTER;
2924 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2925 ioa_cfg->dump = NULL;
2926 ioa_cfg->sdt_state = INACTIVE;
2927 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2928
2929 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2930 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2931
2932 kfree(dump);
2933 LEAVE;
2934}
2935
2936/**
2937 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00002938 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 *
2940 * Called at task level from a work thread. This function takes care
2941 * of adding and removing device from the mid-layer as configuration
2942 * changes are detected by the adapter.
2943 *
2944 * Return value:
2945 * nothing
2946 **/
David Howellsc4028952006-11-22 14:57:56 +00002947static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948{
2949 unsigned long lock_flags;
2950 struct ipr_resource_entry *res;
2951 struct scsi_device *sdev;
2952 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00002953 struct ipr_ioa_cfg *ioa_cfg =
2954 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 u8 bus, target, lun;
2956 int did_work;
2957
2958 ENTER;
2959 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2960
2961 if (ioa_cfg->sdt_state == GET_DUMP) {
2962 dump = ioa_cfg->dump;
2963 if (!dump) {
2964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2965 return;
2966 }
2967 kref_get(&dump->kref);
2968 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2969 ipr_get_ioa_dump(ioa_cfg, dump);
2970 kref_put(&dump->kref, ipr_release_dump);
2971
2972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2973 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2974 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2976 return;
2977 }
2978
2979restart:
2980 do {
2981 did_work = 0;
2982 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2984 return;
2985 }
2986
2987 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2988 if (res->del_from_ml && res->sdev) {
2989 did_work = 1;
2990 sdev = res->sdev;
2991 if (!scsi_device_get(sdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2993 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2994 scsi_remove_device(sdev);
2995 scsi_device_put(sdev);
2996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2997 }
2998 break;
2999 }
3000 }
3001 } while(did_work);
3002
3003 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3004 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003005 bus = res->bus;
3006 target = res->target;
3007 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003008 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3010 scsi_add_device(ioa_cfg->host, bus, target, lun);
3011 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3012 goto restart;
3013 }
3014 }
3015
3016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003017 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 LEAVE;
3019}
3020
3021#ifdef CONFIG_SCSI_IPR_TRACE
3022/**
3023 * ipr_read_trace - Dump the adapter trace
3024 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003025 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 * @buf: buffer
3027 * @off: offset
3028 * @count: buffer size
3029 *
3030 * Return value:
3031 * number of bytes printed to buffer
3032 **/
Zhang Rui91a69022007-06-09 13:57:22 +08003033static ssize_t ipr_read_trace(struct kobject *kobj,
3034 struct bin_attribute *bin_attr,
3035 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036{
Tony Jonesee959b02008-02-22 00:13:36 +01003037 struct device *dev = container_of(kobj, struct device, kobj);
3038 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3040 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003041 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
3043 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003044 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3045 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003047
3048 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049}
3050
3051static struct bin_attribute ipr_trace_attr = {
3052 .attr = {
3053 .name = "trace",
3054 .mode = S_IRUGO,
3055 },
3056 .size = 0,
3057 .read = ipr_read_trace,
3058};
3059#endif
3060
brking@us.ibm.com62275042005-11-01 17:01:14 -06003061static const struct {
3062 enum ipr_cache_state state;
3063 char *name;
3064} cache_state [] = {
3065 { CACHE_NONE, "none" },
3066 { CACHE_DISABLED, "disabled" },
3067 { CACHE_ENABLED, "enabled" }
3068};
3069
3070/**
3071 * ipr_show_write_caching - Show the write caching attribute
Tony Jonesee959b02008-02-22 00:13:36 +01003072 * @dev: device struct
3073 * @buf: buffer
brking@us.ibm.com62275042005-11-01 17:01:14 -06003074 *
3075 * Return value:
3076 * number of bytes printed to buffer
3077 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003078static ssize_t ipr_show_write_caching(struct device *dev,
3079 struct device_attribute *attr, char *buf)
brking@us.ibm.com62275042005-11-01 17:01:14 -06003080{
Tony Jonesee959b02008-02-22 00:13:36 +01003081 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.com62275042005-11-01 17:01:14 -06003082 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3083 unsigned long lock_flags = 0;
3084 int i, len = 0;
3085
3086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3087 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
3088 if (cache_state[i].state == ioa_cfg->cache_state) {
3089 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
3090 break;
3091 }
3092 }
3093 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3094 return len;
3095}
3096
3097
3098/**
3099 * ipr_store_write_caching - Enable/disable adapter write cache
Tony Jonesee959b02008-02-22 00:13:36 +01003100 * @dev: device struct
3101 * @buf: buffer
3102 * @count: buffer size
brking@us.ibm.com62275042005-11-01 17:01:14 -06003103 *
3104 * This function will enable/disable adapter write cache.
3105 *
3106 * Return value:
3107 * count on success / other on failure
3108 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003109static ssize_t ipr_store_write_caching(struct device *dev,
3110 struct device_attribute *attr,
3111 const char *buf, size_t count)
brking@us.ibm.com62275042005-11-01 17:01:14 -06003112{
Tony Jonesee959b02008-02-22 00:13:36 +01003113 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.com62275042005-11-01 17:01:14 -06003114 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3115 unsigned long lock_flags = 0;
3116 enum ipr_cache_state new_state = CACHE_INVALID;
3117 int i;
3118
3119 if (!capable(CAP_SYS_ADMIN))
3120 return -EACCES;
3121 if (ioa_cfg->cache_state == CACHE_NONE)
3122 return -EINVAL;
3123
3124 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
3125 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
3126 new_state = cache_state[i].state;
3127 break;
3128 }
3129 }
3130
3131 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
3132 return -EINVAL;
3133
3134 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3135 if (ioa_cfg->cache_state == new_state) {
3136 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3137 return count;
3138 }
3139
3140 ioa_cfg->cache_state = new_state;
3141 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
3142 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
3143 if (!ioa_cfg->in_reset_reload)
3144 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3145 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3146 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3147
3148 return count;
3149}
3150
Tony Jonesee959b02008-02-22 00:13:36 +01003151static struct device_attribute ipr_ioa_cache_attr = {
brking@us.ibm.com62275042005-11-01 17:01:14 -06003152 .attr = {
3153 .name = "write_cache",
3154 .mode = S_IRUGO | S_IWUSR,
3155 },
3156 .show = ipr_show_write_caching,
3157 .store = ipr_store_write_caching
3158};
3159
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160/**
3161 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003162 * @dev: class device struct
3163 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 *
3165 * Return value:
3166 * number of bytes printed to buffer
3167 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003168static ssize_t ipr_show_fw_version(struct device *dev,
3169 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170{
Tony Jonesee959b02008-02-22 00:13:36 +01003171 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3173 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3174 unsigned long lock_flags = 0;
3175 int len;
3176
3177 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3178 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3179 ucode_vpd->major_release, ucode_vpd->card_type,
3180 ucode_vpd->minor_release[0],
3181 ucode_vpd->minor_release[1]);
3182 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3183 return len;
3184}
3185
Tony Jonesee959b02008-02-22 00:13:36 +01003186static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 .attr = {
3188 .name = "fw_version",
3189 .mode = S_IRUGO,
3190 },
3191 .show = ipr_show_fw_version,
3192};
3193
3194/**
3195 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003196 * @dev: class device struct
3197 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 *
3199 * Return value:
3200 * number of bytes printed to buffer
3201 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003202static ssize_t ipr_show_log_level(struct device *dev,
3203 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204{
Tony Jonesee959b02008-02-22 00:13:36 +01003205 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3207 unsigned long lock_flags = 0;
3208 int len;
3209
3210 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3211 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3213 return len;
3214}
3215
3216/**
3217 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003218 * @dev: class device struct
3219 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 *
3221 * Return value:
3222 * number of bytes printed to buffer
3223 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003224static ssize_t ipr_store_log_level(struct device *dev,
3225 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 const char *buf, size_t count)
3227{
Tony Jonesee959b02008-02-22 00:13:36 +01003228 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3230 unsigned long lock_flags = 0;
3231
3232 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3233 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3234 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3235 return strlen(buf);
3236}
3237
Tony Jonesee959b02008-02-22 00:13:36 +01003238static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 .attr = {
3240 .name = "log_level",
3241 .mode = S_IRUGO | S_IWUSR,
3242 },
3243 .show = ipr_show_log_level,
3244 .store = ipr_store_log_level
3245};
3246
3247/**
3248 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003249 * @dev: device struct
3250 * @buf: buffer
3251 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 *
3253 * This function will reset the adapter and wait a reasonable
3254 * amount of time for any errors that the adapter might log.
3255 *
3256 * Return value:
3257 * count on success / other on failure
3258 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003259static ssize_t ipr_store_diagnostics(struct device *dev,
3260 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 const char *buf, size_t count)
3262{
Tony Jonesee959b02008-02-22 00:13:36 +01003263 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3265 unsigned long lock_flags = 0;
3266 int rc = count;
3267
3268 if (!capable(CAP_SYS_ADMIN))
3269 return -EACCES;
3270
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003272 while(ioa_cfg->in_reset_reload) {
3273 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3274 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3276 }
3277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 ioa_cfg->errors_logged = 0;
3279 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3280
3281 if (ioa_cfg->in_reset_reload) {
3282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3283 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3284
3285 /* Wait for a second for any errors to be logged */
3286 msleep(1000);
3287 } else {
3288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289 return -EIO;
3290 }
3291
3292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3293 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3294 rc = -EIO;
3295 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3296
3297 return rc;
3298}
3299
Tony Jonesee959b02008-02-22 00:13:36 +01003300static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 .attr = {
3302 .name = "run_diagnostics",
3303 .mode = S_IWUSR,
3304 },
3305 .store = ipr_store_diagnostics
3306};
3307
3308/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003309 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003310 * @class_dev: device struct
3311 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003312 *
3313 * Return value:
3314 * number of bytes printed to buffer
3315 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003316static ssize_t ipr_show_adapter_state(struct device *dev,
3317 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003318{
Tony Jonesee959b02008-02-22 00:13:36 +01003319 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003320 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3321 unsigned long lock_flags = 0;
3322 int len;
3323
3324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3325 if (ioa_cfg->ioa_is_dead)
3326 len = snprintf(buf, PAGE_SIZE, "offline\n");
3327 else
3328 len = snprintf(buf, PAGE_SIZE, "online\n");
3329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3330 return len;
3331}
3332
3333/**
3334 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003335 * @dev: device struct
3336 * @buf: buffer
3337 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003338 *
3339 * This function will change the adapter's state.
3340 *
3341 * Return value:
3342 * count on success / other on failure
3343 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003344static ssize_t ipr_store_adapter_state(struct device *dev,
3345 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003346 const char *buf, size_t count)
3347{
Tony Jonesee959b02008-02-22 00:13:36 +01003348 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3350 unsigned long lock_flags;
3351 int result = count;
3352
3353 if (!capable(CAP_SYS_ADMIN))
3354 return -EACCES;
3355
3356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3357 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3358 ioa_cfg->ioa_is_dead = 0;
3359 ioa_cfg->reset_retries = 0;
3360 ioa_cfg->in_ioa_bringdown = 0;
3361 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3362 }
3363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3364 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3365
3366 return result;
3367}
3368
Tony Jonesee959b02008-02-22 00:13:36 +01003369static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003370 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003371 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003372 .mode = S_IRUGO | S_IWUSR,
3373 },
3374 .show = ipr_show_adapter_state,
3375 .store = ipr_store_adapter_state
3376};
3377
3378/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003380 * @dev: device struct
3381 * @buf: buffer
3382 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 *
3384 * This function will reset the adapter.
3385 *
3386 * Return value:
3387 * count on success / other on failure
3388 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003389static ssize_t ipr_store_reset_adapter(struct device *dev,
3390 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 const char *buf, size_t count)
3392{
Tony Jonesee959b02008-02-22 00:13:36 +01003393 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3395 unsigned long lock_flags;
3396 int result = count;
3397
3398 if (!capable(CAP_SYS_ADMIN))
3399 return -EACCES;
3400
3401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3402 if (!ioa_cfg->in_reset_reload)
3403 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3405 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3406
3407 return result;
3408}
3409
Tony Jonesee959b02008-02-22 00:13:36 +01003410static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 .attr = {
3412 .name = "reset_host",
3413 .mode = S_IWUSR,
3414 },
3415 .store = ipr_store_reset_adapter
3416};
3417
3418/**
3419 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3420 * @buf_len: buffer length
3421 *
3422 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3423 * list to use for microcode download
3424 *
3425 * Return value:
3426 * pointer to sglist / NULL on failure
3427 **/
3428static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3429{
3430 int sg_size, order, bsize_elem, num_elem, i, j;
3431 struct ipr_sglist *sglist;
3432 struct scatterlist *scatterlist;
3433 struct page *page;
3434
3435 /* Get the minimum size per scatter/gather element */
3436 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3437
3438 /* Get the actual size per element */
3439 order = get_order(sg_size);
3440
3441 /* Determine the actual number of bytes per element */
3442 bsize_elem = PAGE_SIZE * (1 << order);
3443
3444 /* Determine the actual number of sg entries needed */
3445 if (buf_len % bsize_elem)
3446 num_elem = (buf_len / bsize_elem) + 1;
3447 else
3448 num_elem = buf_len / bsize_elem;
3449
3450 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003451 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 (sizeof(struct scatterlist) * (num_elem - 1)),
3453 GFP_KERNEL);
3454
3455 if (sglist == NULL) {
3456 ipr_trace;
3457 return NULL;
3458 }
3459
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003461 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
3463 sglist->order = order;
3464 sglist->num_sg = num_elem;
3465
3466 /* Allocate a bunch of sg elements */
3467 for (i = 0; i < num_elem; i++) {
3468 page = alloc_pages(GFP_KERNEL, order);
3469 if (!page) {
3470 ipr_trace;
3471
3472 /* Free up what we already allocated */
3473 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003474 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 kfree(sglist);
3476 return NULL;
3477 }
3478
Jens Axboe642f1492007-10-24 11:20:47 +02003479 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 }
3481
3482 return sglist;
3483}
3484
3485/**
3486 * ipr_free_ucode_buffer - Frees a microcode download buffer
3487 * @p_dnld: scatter/gather list pointer
3488 *
3489 * Free a DMA'able ucode download buffer previously allocated with
3490 * ipr_alloc_ucode_buffer
3491 *
3492 * Return value:
3493 * nothing
3494 **/
3495static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3496{
3497 int i;
3498
3499 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003500 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
3502 kfree(sglist);
3503}
3504
3505/**
3506 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3507 * @sglist: scatter/gather list pointer
3508 * @buffer: buffer pointer
3509 * @len: buffer length
3510 *
3511 * Copy a microcode image from a user buffer into a buffer allocated by
3512 * ipr_alloc_ucode_buffer
3513 *
3514 * Return value:
3515 * 0 on success / other on failure
3516 **/
3517static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3518 u8 *buffer, u32 len)
3519{
3520 int bsize_elem, i, result = 0;
3521 struct scatterlist *scatterlist;
3522 void *kaddr;
3523
3524 /* Determine the actual number of bytes per element */
3525 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3526
3527 scatterlist = sglist->scatterlist;
3528
3529 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003530 struct page *page = sg_page(&scatterlist[i]);
3531
3532 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003534 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
3536 scatterlist[i].length = bsize_elem;
3537
3538 if (result != 0) {
3539 ipr_trace;
3540 return result;
3541 }
3542 }
3543
3544 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003545 struct page *page = sg_page(&scatterlist[i]);
3546
3547 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003549 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550
3551 scatterlist[i].length = len % bsize_elem;
3552 }
3553
3554 sglist->buffer_len = len;
3555 return result;
3556}
3557
3558/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003559 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3560 * @ipr_cmd: ipr command struct
3561 * @sglist: scatter/gather list
3562 *
3563 * Builds a microcode download IOA data list (IOADL).
3564 *
3565 **/
3566static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3567 struct ipr_sglist *sglist)
3568{
3569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3570 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3571 struct scatterlist *scatterlist = sglist->scatterlist;
3572 int i;
3573
3574 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3575 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3576 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3577
3578 ioarcb->ioadl_len =
3579 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3580 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3581 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3582 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3583 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3584 }
3585
3586 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3587}
3588
3589/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003590 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 * @ipr_cmd: ipr command struct
3592 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003594 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003597static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3598 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003601 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 struct scatterlist *scatterlist = sglist->scatterlist;
3603 int i;
3604
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003605 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003607 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3608
3609 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3611
3612 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3613 ioadl[i].flags_and_data_len =
3614 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3615 ioadl[i].address =
3616 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3617 }
3618
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003619 ioadl[i-1].flags_and_data_len |=
3620 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3621}
3622
3623/**
3624 * ipr_update_ioa_ucode - Update IOA's microcode
3625 * @ioa_cfg: ioa config struct
3626 * @sglist: scatter/gather list
3627 *
3628 * Initiate an adapter reset to update the IOA's microcode
3629 *
3630 * Return value:
3631 * 0 on success / -EIO on failure
3632 **/
3633static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3634 struct ipr_sglist *sglist)
3635{
3636 unsigned long lock_flags;
3637
3638 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003639 while(ioa_cfg->in_reset_reload) {
3640 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3641 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3642 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3643 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003644
3645 if (ioa_cfg->ucode_sglist) {
3646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3647 dev_err(&ioa_cfg->pdev->dev,
3648 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 return -EIO;
3650 }
3651
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003652 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3653 sglist->num_sg, DMA_TO_DEVICE);
3654
3655 if (!sglist->num_dma_sg) {
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 dev_err(&ioa_cfg->pdev->dev,
3658 "Failed to map microcode download buffer!\n");
3659 return -EIO;
3660 }
3661
3662 ioa_cfg->ucode_sglist = sglist;
3663 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3664 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3665 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3666
3667 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3668 ioa_cfg->ucode_sglist = NULL;
3669 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 return 0;
3671}
3672
3673/**
3674 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003675 * @class_dev: device struct
3676 * @buf: buffer
3677 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 *
3679 * This function will update the firmware on the adapter.
3680 *
3681 * Return value:
3682 * count on success / other on failure
3683 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003684static ssize_t ipr_store_update_fw(struct device *dev,
3685 struct device_attribute *attr,
3686 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687{
Tony Jonesee959b02008-02-22 00:13:36 +01003688 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3690 struct ipr_ucode_image_header *image_hdr;
3691 const struct firmware *fw_entry;
3692 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 char fname[100];
3694 char *src;
3695 int len, result, dnld_size;
3696
3697 if (!capable(CAP_SYS_ADMIN))
3698 return -EACCES;
3699
3700 len = snprintf(fname, 99, "%s", buf);
3701 fname[len-1] = '\0';
3702
3703 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3704 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3705 return -EIO;
3706 }
3707
3708 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3709
3710 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3711 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3712 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3713 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3714 release_firmware(fw_entry);
3715 return -EINVAL;
3716 }
3717
3718 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3719 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3720 sglist = ipr_alloc_ucode_buffer(dnld_size);
3721
3722 if (!sglist) {
3723 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3724 release_firmware(fw_entry);
3725 return -ENOMEM;
3726 }
3727
3728 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3729
3730 if (result) {
3731 dev_err(&ioa_cfg->pdev->dev,
3732 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003733 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 }
3735
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003736 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003738 if (!result)
3739 result = count;
3740out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 ipr_free_ucode_buffer(sglist);
3742 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003743 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744}
3745
Tony Jonesee959b02008-02-22 00:13:36 +01003746static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 .attr = {
3748 .name = "update_fw",
3749 .mode = S_IWUSR,
3750 },
3751 .store = ipr_store_update_fw
3752};
3753
Tony Jonesee959b02008-02-22 00:13:36 +01003754static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 &ipr_fw_version_attr,
3756 &ipr_log_level_attr,
3757 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003758 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759 &ipr_ioa_reset_attr,
3760 &ipr_update_fw_attr,
brking@us.ibm.com62275042005-11-01 17:01:14 -06003761 &ipr_ioa_cache_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 NULL,
3763};
3764
3765#ifdef CONFIG_SCSI_IPR_DUMP
3766/**
3767 * ipr_read_dump - Dump the adapter
3768 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003769 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 * @buf: buffer
3771 * @off: offset
3772 * @count: buffer size
3773 *
3774 * Return value:
3775 * number of bytes printed to buffer
3776 **/
Zhang Rui91a69022007-06-09 13:57:22 +08003777static ssize_t ipr_read_dump(struct kobject *kobj,
3778 struct bin_attribute *bin_attr,
3779 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780{
Tony Jonesee959b02008-02-22 00:13:36 +01003781 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 struct Scsi_Host *shost = class_to_shost(cdev);
3783 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3784 struct ipr_dump *dump;
3785 unsigned long lock_flags = 0;
3786 char *src;
3787 int len;
3788 size_t rc = count;
3789
3790 if (!capable(CAP_SYS_ADMIN))
3791 return -EACCES;
3792
3793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3794 dump = ioa_cfg->dump;
3795
3796 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3797 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3798 return 0;
3799 }
3800 kref_get(&dump->kref);
3801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3802
3803 if (off > dump->driver_dump.hdr.len) {
3804 kref_put(&dump->kref, ipr_release_dump);
3805 return 0;
3806 }
3807
3808 if (off + count > dump->driver_dump.hdr.len) {
3809 count = dump->driver_dump.hdr.len - off;
3810 rc = count;
3811 }
3812
3813 if (count && off < sizeof(dump->driver_dump)) {
3814 if (off + count > sizeof(dump->driver_dump))
3815 len = sizeof(dump->driver_dump) - off;
3816 else
3817 len = count;
3818 src = (u8 *)&dump->driver_dump + off;
3819 memcpy(buf, src, len);
3820 buf += len;
3821 off += len;
3822 count -= len;
3823 }
3824
3825 off -= sizeof(dump->driver_dump);
3826
3827 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3828 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3829 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3830 else
3831 len = count;
3832 src = (u8 *)&dump->ioa_dump + off;
3833 memcpy(buf, src, len);
3834 buf += len;
3835 off += len;
3836 count -= len;
3837 }
3838
3839 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3840
3841 while (count) {
3842 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3843 len = PAGE_ALIGN(off) - off;
3844 else
3845 len = count;
3846 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3847 src += off & ~PAGE_MASK;
3848 memcpy(buf, src, len);
3849 buf += len;
3850 off += len;
3851 count -= len;
3852 }
3853
3854 kref_put(&dump->kref, ipr_release_dump);
3855 return rc;
3856}
3857
3858/**
3859 * ipr_alloc_dump - Prepare for adapter dump
3860 * @ioa_cfg: ioa config struct
3861 *
3862 * Return value:
3863 * 0 on success / other on failure
3864 **/
3865static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3866{
3867 struct ipr_dump *dump;
3868 unsigned long lock_flags = 0;
3869
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003870 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
3872 if (!dump) {
3873 ipr_err("Dump memory allocation failed\n");
3874 return -ENOMEM;
3875 }
3876
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 kref_init(&dump->kref);
3878 dump->ioa_cfg = ioa_cfg;
3879
3880 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3881
3882 if (INACTIVE != ioa_cfg->sdt_state) {
3883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3884 kfree(dump);
3885 return 0;
3886 }
3887
3888 ioa_cfg->dump = dump;
3889 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3890 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3891 ioa_cfg->dump_taken = 1;
3892 schedule_work(&ioa_cfg->work_q);
3893 }
3894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3895
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 return 0;
3897}
3898
3899/**
3900 * ipr_free_dump - Free adapter dump memory
3901 * @ioa_cfg: ioa config struct
3902 *
3903 * Return value:
3904 * 0 on success / other on failure
3905 **/
3906static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3907{
3908 struct ipr_dump *dump;
3909 unsigned long lock_flags = 0;
3910
3911 ENTER;
3912
3913 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3914 dump = ioa_cfg->dump;
3915 if (!dump) {
3916 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3917 return 0;
3918 }
3919
3920 ioa_cfg->dump = NULL;
3921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3922
3923 kref_put(&dump->kref, ipr_release_dump);
3924
3925 LEAVE;
3926 return 0;
3927}
3928
3929/**
3930 * ipr_write_dump - Setup dump state of adapter
3931 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003932 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 * @buf: buffer
3934 * @off: offset
3935 * @count: buffer size
3936 *
3937 * Return value:
3938 * number of bytes printed to buffer
3939 **/
Zhang Rui91a69022007-06-09 13:57:22 +08003940static ssize_t ipr_write_dump(struct kobject *kobj,
3941 struct bin_attribute *bin_attr,
3942 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943{
Tony Jonesee959b02008-02-22 00:13:36 +01003944 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 struct Scsi_Host *shost = class_to_shost(cdev);
3946 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3947 int rc;
3948
3949 if (!capable(CAP_SYS_ADMIN))
3950 return -EACCES;
3951
3952 if (buf[0] == '1')
3953 rc = ipr_alloc_dump(ioa_cfg);
3954 else if (buf[0] == '0')
3955 rc = ipr_free_dump(ioa_cfg);
3956 else
3957 return -EINVAL;
3958
3959 if (rc)
3960 return rc;
3961 else
3962 return count;
3963}
3964
3965static struct bin_attribute ipr_dump_attr = {
3966 .attr = {
3967 .name = "dump",
3968 .mode = S_IRUSR | S_IWUSR,
3969 },
3970 .size = 0,
3971 .read = ipr_read_dump,
3972 .write = ipr_write_dump
3973};
3974#else
3975static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3976#endif
3977
3978/**
3979 * ipr_change_queue_depth - Change the device's queue depth
3980 * @sdev: scsi device struct
3981 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07003982 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 *
3984 * Return value:
3985 * actual depth set
3986 **/
Mike Christiee881a172009-10-15 17:46:39 -07003987static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3988 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989{
Brian King35a39692006-09-25 12:39:20 -05003990 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3991 struct ipr_resource_entry *res;
3992 unsigned long lock_flags = 0;
3993
Mike Christiee881a172009-10-15 17:46:39 -07003994 if (reason != SCSI_QDEPTH_DEFAULT)
3995 return -EOPNOTSUPP;
3996
Brian King35a39692006-09-25 12:39:20 -05003997 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3998 res = (struct ipr_resource_entry *)sdev->hostdata;
3999
4000 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4001 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4002 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4003
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4005 return sdev->queue_depth;
4006}
4007
4008/**
4009 * ipr_change_queue_type - Change the device's queue type
4010 * @dsev: scsi device struct
4011 * @tag_type: type of tags to use
4012 *
4013 * Return value:
4014 * actual queue type set
4015 **/
4016static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4017{
4018 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4019 struct ipr_resource_entry *res;
4020 unsigned long lock_flags = 0;
4021
4022 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4023 res = (struct ipr_resource_entry *)sdev->hostdata;
4024
4025 if (res) {
4026 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4027 /*
4028 * We don't bother quiescing the device here since the
4029 * adapter firmware does it for us.
4030 */
4031 scsi_set_tag_type(sdev, tag_type);
4032
4033 if (tag_type)
4034 scsi_activate_tcq(sdev, sdev->queue_depth);
4035 else
4036 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4037 } else
4038 tag_type = 0;
4039 } else
4040 tag_type = 0;
4041
4042 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4043 return tag_type;
4044}
4045
4046/**
4047 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4048 * @dev: device struct
4049 * @buf: buffer
4050 *
4051 * Return value:
4052 * number of bytes printed to buffer
4053 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004054static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055{
4056 struct scsi_device *sdev = to_scsi_device(dev);
4057 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4058 struct ipr_resource_entry *res;
4059 unsigned long lock_flags = 0;
4060 ssize_t len = -ENXIO;
4061
4062 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4063 res = (struct ipr_resource_entry *)sdev->hostdata;
4064 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004065 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4067 return len;
4068}
4069
4070static struct device_attribute ipr_adapter_handle_attr = {
4071 .attr = {
4072 .name = "adapter_handle",
4073 .mode = S_IRUSR,
4074 },
4075 .show = ipr_show_adapter_handle
4076};
4077
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004078/**
4079 * ipr_show_resource_path - Show the resource path for this device.
4080 * @dev: device struct
4081 * @buf: buffer
4082 *
4083 * Return value:
4084 * number of bytes printed to buffer
4085 **/
4086static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4087{
4088 struct scsi_device *sdev = to_scsi_device(dev);
4089 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4090 struct ipr_resource_entry *res;
4091 unsigned long lock_flags = 0;
4092 ssize_t len = -ENXIO;
4093 char buffer[IPR_MAX_RES_PATH_LENGTH];
4094
4095 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4096 res = (struct ipr_resource_entry *)sdev->hostdata;
4097 if (res)
4098 len = snprintf(buf, PAGE_SIZE, "%s\n",
4099 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4100 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4101 return len;
4102}
4103
4104static struct device_attribute ipr_resource_path_attr = {
4105 .attr = {
4106 .name = "resource_path",
4107 .mode = S_IRUSR,
4108 },
4109 .show = ipr_show_resource_path
4110};
4111
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112static struct device_attribute *ipr_dev_attrs[] = {
4113 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004114 &ipr_resource_path_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115 NULL,
4116};
4117
4118/**
4119 * ipr_biosparam - Return the HSC mapping
4120 * @sdev: scsi device struct
4121 * @block_device: block device pointer
4122 * @capacity: capacity of the device
4123 * @parm: Array containing returned HSC values.
4124 *
4125 * This function generates the HSC parms that fdisk uses.
4126 * We want to make sure we return something that places partitions
4127 * on 4k boundaries for best performance with the IOA.
4128 *
4129 * Return value:
4130 * 0 on success
4131 **/
4132static int ipr_biosparam(struct scsi_device *sdev,
4133 struct block_device *block_device,
4134 sector_t capacity, int *parm)
4135{
4136 int heads, sectors;
4137 sector_t cylinders;
4138
4139 heads = 128;
4140 sectors = 32;
4141
4142 cylinders = capacity;
4143 sector_div(cylinders, (128 * 32));
4144
4145 /* return result */
4146 parm[0] = heads;
4147 parm[1] = sectors;
4148 parm[2] = cylinders;
4149
4150 return 0;
4151}
4152
4153/**
Brian King35a39692006-09-25 12:39:20 -05004154 * ipr_find_starget - Find target based on bus/target.
4155 * @starget: scsi target struct
4156 *
4157 * Return value:
4158 * resource entry pointer if found / NULL if not found
4159 **/
4160static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4161{
4162 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4163 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4164 struct ipr_resource_entry *res;
4165
4166 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004167 if ((res->bus == starget->channel) &&
4168 (res->target == starget->id) &&
4169 (res->lun == 0)) {
Brian King35a39692006-09-25 12:39:20 -05004170 return res;
4171 }
4172 }
4173
4174 return NULL;
4175}
4176
4177static struct ata_port_info sata_port_info;
4178
4179/**
4180 * ipr_target_alloc - Prepare for commands to a SCSI target
4181 * @starget: scsi target struct
4182 *
4183 * If the device is a SATA device, this function allocates an
4184 * ATA port with libata, else it does nothing.
4185 *
4186 * Return value:
4187 * 0 on success / non-0 on failure
4188 **/
4189static int ipr_target_alloc(struct scsi_target *starget)
4190{
4191 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4192 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4193 struct ipr_sata_port *sata_port;
4194 struct ata_port *ap;
4195 struct ipr_resource_entry *res;
4196 unsigned long lock_flags;
4197
4198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4199 res = ipr_find_starget(starget);
4200 starget->hostdata = NULL;
4201
4202 if (res && ipr_is_gata(res)) {
4203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4204 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4205 if (!sata_port)
4206 return -ENOMEM;
4207
4208 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4209 if (ap) {
4210 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4211 sata_port->ioa_cfg = ioa_cfg;
4212 sata_port->ap = ap;
4213 sata_port->res = res;
4214
4215 res->sata_port = sata_port;
4216 ap->private_data = sata_port;
4217 starget->hostdata = sata_port;
4218 } else {
4219 kfree(sata_port);
4220 return -ENOMEM;
4221 }
4222 }
4223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4224
4225 return 0;
4226}
4227
4228/**
4229 * ipr_target_destroy - Destroy a SCSI target
4230 * @starget: scsi target struct
4231 *
4232 * If the device was a SATA device, this function frees the libata
4233 * ATA port, else it does nothing.
4234 *
4235 **/
4236static void ipr_target_destroy(struct scsi_target *starget)
4237{
4238 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004239 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4240 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4241
4242 if (ioa_cfg->sis64) {
4243 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4244 clear_bit(starget->id, ioa_cfg->array_ids);
4245 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4246 clear_bit(starget->id, ioa_cfg->vset_ids);
4247 else if (starget->channel == 0)
4248 clear_bit(starget->id, ioa_cfg->target_ids);
4249 }
Brian King35a39692006-09-25 12:39:20 -05004250
4251 if (sata_port) {
4252 starget->hostdata = NULL;
4253 ata_sas_port_destroy(sata_port->ap);
4254 kfree(sata_port);
4255 }
4256}
4257
4258/**
4259 * ipr_find_sdev - Find device based on bus/target/lun.
4260 * @sdev: scsi device struct
4261 *
4262 * Return value:
4263 * resource entry pointer if found / NULL if not found
4264 **/
4265static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4266{
4267 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4268 struct ipr_resource_entry *res;
4269
4270 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004271 if ((res->bus == sdev->channel) &&
4272 (res->target == sdev->id) &&
4273 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004274 return res;
4275 }
4276
4277 return NULL;
4278}
4279
4280/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281 * ipr_slave_destroy - Unconfigure a SCSI device
4282 * @sdev: scsi device struct
4283 *
4284 * Return value:
4285 * nothing
4286 **/
4287static void ipr_slave_destroy(struct scsi_device *sdev)
4288{
4289 struct ipr_resource_entry *res;
4290 struct ipr_ioa_cfg *ioa_cfg;
4291 unsigned long lock_flags = 0;
4292
4293 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4294
4295 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4296 res = (struct ipr_resource_entry *) sdev->hostdata;
4297 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004298 if (res->sata_port)
4299 ata_port_disable(res->sata_port->ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 sdev->hostdata = NULL;
4301 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004302 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 }
4304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4305}
4306
4307/**
4308 * ipr_slave_configure - Configure a SCSI device
4309 * @sdev: scsi device struct
4310 *
4311 * This function configures the specified scsi device.
4312 *
4313 * Return value:
4314 * 0 on success
4315 **/
4316static int ipr_slave_configure(struct scsi_device *sdev)
4317{
4318 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4319 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004320 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004322 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
4324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4325 res = sdev->hostdata;
4326 if (res) {
4327 if (ipr_is_af_dasd_device(res))
4328 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004329 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004331 sdev->no_uld_attach = 1;
4332 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004334 blk_queue_rq_timeout(sdev->request_queue,
4335 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004336 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337 }
Brian Kinge4fbf442006-03-29 09:37:22 -06004338 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 sdev->allow_restart = 1;
Brian Kingdd406ef2009-04-22 08:58:02 -05004340 if (ipr_is_gata(res) && res->sata_port)
4341 ap = res->sata_port->ap;
4342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4343
4344 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004345 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004346 ata_sas_slave_configure(sdev, ap);
4347 } else
Brian King35a39692006-09-25 12:39:20 -05004348 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004349 if (ioa_cfg->sis64)
4350 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4351 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
Brian Kingdd406ef2009-04-22 08:58:02 -05004352 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 }
4354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4355 return 0;
4356}
4357
4358/**
Brian King35a39692006-09-25 12:39:20 -05004359 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4360 * @sdev: scsi device struct
4361 *
4362 * This function initializes an ATA port so that future commands
4363 * sent through queuecommand will work.
4364 *
4365 * Return value:
4366 * 0 on success
4367 **/
4368static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4369{
4370 struct ipr_sata_port *sata_port = NULL;
4371 int rc = -ENXIO;
4372
4373 ENTER;
4374 if (sdev->sdev_target)
4375 sata_port = sdev->sdev_target->hostdata;
4376 if (sata_port)
4377 rc = ata_sas_port_init(sata_port->ap);
4378 if (rc)
4379 ipr_slave_destroy(sdev);
4380
4381 LEAVE;
4382 return rc;
4383}
4384
4385/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386 * ipr_slave_alloc - Prepare for commands to a device.
4387 * @sdev: scsi device struct
4388 *
4389 * This function saves a pointer to the resource entry
4390 * in the scsi device struct if the device exists. We
4391 * can then use this pointer in ipr_queuecommand when
4392 * handling new commands.
4393 *
4394 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004395 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 **/
4397static int ipr_slave_alloc(struct scsi_device *sdev)
4398{
4399 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4400 struct ipr_resource_entry *res;
4401 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004402 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403
4404 sdev->hostdata = NULL;
4405
4406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407
Brian King35a39692006-09-25 12:39:20 -05004408 res = ipr_find_sdev(sdev);
4409 if (res) {
4410 res->sdev = sdev;
4411 res->add_to_ml = 0;
4412 res->in_erp = 0;
4413 sdev->hostdata = res;
4414 if (!ipr_is_naca_model(res))
4415 res->needs_sync_complete = 1;
4416 rc = 0;
4417 if (ipr_is_gata(res)) {
4418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4419 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 }
4421 }
4422
4423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4424
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004425 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426}
4427
4428/**
4429 * ipr_eh_host_reset - Reset the host adapter
4430 * @scsi_cmd: scsi command struct
4431 *
4432 * Return value:
4433 * SUCCESS / FAILED
4434 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04004435static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436{
4437 struct ipr_ioa_cfg *ioa_cfg;
4438 int rc;
4439
4440 ENTER;
4441 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4442
4443 dev_err(&ioa_cfg->pdev->dev,
4444 "Adapter being reset as a result of error recovery.\n");
4445
4446 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4447 ioa_cfg->sdt_state = GET_DUMP;
4448
4449 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4450
4451 LEAVE;
4452 return rc;
4453}
4454
Jeff Garzik df0ae242005-05-28 07:57:14 -04004455static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4456{
4457 int rc;
4458
4459 spin_lock_irq(cmd->device->host->host_lock);
4460 rc = __ipr_eh_host_reset(cmd);
4461 spin_unlock_irq(cmd->device->host->host_lock);
4462
4463 return rc;
4464}
4465
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466/**
Brian Kingc6513092006-03-29 09:37:43 -06004467 * ipr_device_reset - Reset the device
4468 * @ioa_cfg: ioa config struct
4469 * @res: resource entry struct
4470 *
4471 * This function issues a device reset to the affected device.
4472 * If the device is a SCSI device, a LUN reset will be sent
4473 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004474 * will be sent. If the device is a SATA device, a PHY reset will
4475 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004476 *
4477 * Return value:
4478 * 0 on success / non-zero on failure
4479 **/
4480static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4481 struct ipr_resource_entry *res)
4482{
4483 struct ipr_cmnd *ipr_cmd;
4484 struct ipr_ioarcb *ioarcb;
4485 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004486 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004487 u32 ioasc;
4488
4489 ENTER;
4490 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4491 ioarcb = &ipr_cmd->ioarcb;
4492 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004493
4494 if (ipr_cmd->ioa_cfg->sis64) {
4495 regs = &ipr_cmd->i.ata_ioadl.regs;
4496 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4497 } else
4498 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004499
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004500 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004501 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4502 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004503 if (ipr_is_gata(res)) {
4504 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004505 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004506 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4507 }
Brian Kingc6513092006-03-29 09:37:43 -06004508
4509 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4510 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4511 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
Brian King35a39692006-09-25 12:39:20 -05004512 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4513 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4514 sizeof(struct ipr_ioasa_gata));
Brian Kingc6513092006-03-29 09:37:43 -06004515
4516 LEAVE;
4517 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4518}
4519
4520/**
Brian King35a39692006-09-25 12:39:20 -05004521 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004522 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004523 * @classes: class of the attached device
4524 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004525 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004526 *
4527 * Return value:
4528 * 0 on success / non-zero on failure
4529 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004530static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004531 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004532{
Tejun Heocc0680a2007-08-06 18:36:23 +09004533 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004534 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4535 struct ipr_resource_entry *res;
4536 unsigned long lock_flags = 0;
4537 int rc = -ENXIO;
4538
4539 ENTER;
4540 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King73d98ff2006-11-21 10:27:58 -06004541 while(ioa_cfg->in_reset_reload) {
4542 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4543 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4544 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4545 }
4546
Brian King35a39692006-09-25 12:39:20 -05004547 res = sata_port->res;
4548 if (res) {
4549 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004550 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004551 }
4552
4553 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4554 LEAVE;
4555 return rc;
4556}
4557
4558/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 * ipr_eh_dev_reset - Reset the device
4560 * @scsi_cmd: scsi command struct
4561 *
4562 * This function issues a device reset to the affected device.
4563 * A LUN reset will be sent to the device first. If that does
4564 * not work, a target reset will be sent.
4565 *
4566 * Return value:
4567 * SUCCESS / FAILED
4568 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004569static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570{
4571 struct ipr_cmnd *ipr_cmd;
4572 struct ipr_ioa_cfg *ioa_cfg;
4573 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004574 struct ata_port *ap;
4575 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576
4577 ENTER;
4578 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4579 res = scsi_cmd->device->hostdata;
4580
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004581 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 return FAILED;
4583
4584 /*
4585 * If we are currently going through reset/reload, return failed. This will force the
4586 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4587 * reset to complete
4588 */
4589 if (ioa_cfg->in_reset_reload)
4590 return FAILED;
4591 if (ioa_cfg->ioa_is_dead)
4592 return FAILED;
4593
4594 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004595 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 if (ipr_cmd->scsi_cmd)
4597 ipr_cmd->done = ipr_scsi_eh_done;
Brian King24d6f2b2007-03-29 12:43:17 -05004598 if (ipr_cmd->qc)
4599 ipr_cmd->done = ipr_sata_eh_done;
Brian King7402ece2006-11-21 10:28:23 -06004600 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4601 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4602 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 }
4605 }
4606
4607 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004608 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05004609
4610 if (ipr_is_gata(res) && res->sata_port) {
4611 ap = res->sata_port->ap;
4612 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09004613 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05004614 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05004615
4616 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004617 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King5af23d22007-05-09 15:36:35 -05004618 rc = -EIO;
4619 break;
4620 }
4621 }
Brian King35a39692006-09-25 12:39:20 -05004622 } else
4623 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 res->resetting_device = 0;
4625
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 LEAVE;
Brian Kingc6513092006-03-29 09:37:43 -06004627 return (rc ? FAILED : SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628}
4629
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004630static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4631{
4632 int rc;
4633
4634 spin_lock_irq(cmd->device->host->host_lock);
4635 rc = __ipr_eh_dev_reset(cmd);
4636 spin_unlock_irq(cmd->device->host->host_lock);
4637
4638 return rc;
4639}
4640
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641/**
4642 * ipr_bus_reset_done - Op done function for bus reset.
4643 * @ipr_cmd: ipr command struct
4644 *
4645 * This function is the op done function for a bus reset
4646 *
4647 * Return value:
4648 * none
4649 **/
4650static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4651{
4652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4653 struct ipr_resource_entry *res;
4654
4655 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004656 if (!ioa_cfg->sis64)
4657 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4658 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4659 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4660 break;
4661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663
4664 /*
4665 * If abort has not completed, indicate the reset has, else call the
4666 * abort's done function to wake the sleeping eh thread
4667 */
4668 if (ipr_cmd->sibling->sibling)
4669 ipr_cmd->sibling->sibling = NULL;
4670 else
4671 ipr_cmd->sibling->done(ipr_cmd->sibling);
4672
4673 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4674 LEAVE;
4675}
4676
4677/**
4678 * ipr_abort_timeout - An abort task has timed out
4679 * @ipr_cmd: ipr command struct
4680 *
4681 * This function handles when an abort task times out. If this
4682 * happens we issue a bus reset since we have resources tied
4683 * up that must be freed before returning to the midlayer.
4684 *
4685 * Return value:
4686 * none
4687 **/
4688static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4689{
4690 struct ipr_cmnd *reset_cmd;
4691 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4692 struct ipr_cmd_pkt *cmd_pkt;
4693 unsigned long lock_flags = 0;
4694
4695 ENTER;
4696 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4697 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4698 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4699 return;
4700 }
4701
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004702 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4704 ipr_cmd->sibling = reset_cmd;
4705 reset_cmd->sibling = ipr_cmd;
4706 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4707 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4708 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4709 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4710 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4711
4712 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4713 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4714 LEAVE;
4715}
4716
4717/**
4718 * ipr_cancel_op - Cancel specified op
4719 * @scsi_cmd: scsi command struct
4720 *
4721 * This function cancels specified op.
4722 *
4723 * Return value:
4724 * SUCCESS / FAILED
4725 **/
4726static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4727{
4728 struct ipr_cmnd *ipr_cmd;
4729 struct ipr_ioa_cfg *ioa_cfg;
4730 struct ipr_resource_entry *res;
4731 struct ipr_cmd_pkt *cmd_pkt;
4732 u32 ioasc;
4733 int op_found = 0;
4734
4735 ENTER;
4736 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4737 res = scsi_cmd->device->hostdata;
4738
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004739 /* If we are currently going through reset/reload, return failed.
4740 * This will force the mid-layer to call ipr_eh_host_reset,
4741 * which will then go to sleep and wait for the reset to complete
4742 */
4743 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4744 return FAILED;
Brian King04d97682006-11-21 10:28:04 -06004745 if (!res || !ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 return FAILED;
4747
4748 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4749 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4750 ipr_cmd->done = ipr_scsi_eh_done;
4751 op_found = 1;
4752 break;
4753 }
4754 }
4755
4756 if (!op_found)
4757 return SUCCESS;
4758
4759 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004760 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4762 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4763 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4764 ipr_cmd->u.sdev = scsi_cmd->device;
4765
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004766 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4767 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4769 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4770
4771 /*
4772 * If the abort task timed out and we sent a bus reset, we will get
4773 * one the following responses to the abort
4774 */
4775 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4776 ioasc = 0;
4777 ipr_trace;
4778 }
4779
4780 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004781 if (!ipr_is_naca_model(res))
4782 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783
4784 LEAVE;
4785 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4786}
4787
4788/**
4789 * ipr_eh_abort - Abort a single op
4790 * @scsi_cmd: scsi command struct
4791 *
4792 * Return value:
4793 * SUCCESS / FAILED
4794 **/
4795static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4796{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004797 unsigned long flags;
4798 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799
4800 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004802 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4803 rc = ipr_cancel_op(scsi_cmd);
4804 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805
4806 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004807 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808}
4809
4810/**
4811 * ipr_handle_other_interrupt - Handle "other" interrupts
4812 * @ioa_cfg: ioa config struct
4813 * @int_reg: interrupt register
4814 *
4815 * Return value:
4816 * IRQ_NONE / IRQ_HANDLED
4817 **/
4818static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4819 volatile u32 int_reg)
4820{
4821 irqreturn_t rc = IRQ_HANDLED;
4822
4823 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4824 /* Mask the interrupt */
4825 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4826
4827 /* Clear the interrupt */
4828 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4829 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4830
4831 list_del(&ioa_cfg->reset_cmd->queue);
4832 del_timer(&ioa_cfg->reset_cmd->timer);
4833 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4834 } else {
4835 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4836 ioa_cfg->ioa_unit_checked = 1;
4837 else
4838 dev_err(&ioa_cfg->pdev->dev,
4839 "Permanent IOA failure. 0x%08X\n", int_reg);
4840
4841 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4842 ioa_cfg->sdt_state = GET_DUMP;
4843
4844 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4845 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4846 }
4847
4848 return rc;
4849}
4850
4851/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004852 * ipr_isr_eh - Interrupt service routine error handler
4853 * @ioa_cfg: ioa config struct
4854 * @msg: message to log
4855 *
4856 * Return value:
4857 * none
4858 **/
4859static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4860{
4861 ioa_cfg->errors_logged++;
4862 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4863
4864 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4865 ioa_cfg->sdt_state = GET_DUMP;
4866
4867 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4868}
4869
4870/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871 * ipr_isr - Interrupt service routine
4872 * @irq: irq number
4873 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 *
4875 * Return value:
4876 * IRQ_NONE / IRQ_HANDLED
4877 **/
David Howells7d12e782006-10-05 14:55:46 +01004878static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879{
4880 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4881 unsigned long lock_flags = 0;
4882 volatile u32 int_reg, int_mask_reg;
4883 u32 ioasc;
4884 u16 cmd_index;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004885 int num_hrrq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 struct ipr_cmnd *ipr_cmd;
4887 irqreturn_t rc = IRQ_NONE;
4888
4889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4890
4891 /* If interrupts are disabled, ignore the interrupt */
4892 if (!ioa_cfg->allow_interrupts) {
4893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4894 return IRQ_NONE;
4895 }
4896
4897 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4898 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4899
4900 /* If an interrupt on the adapter did not occur, ignore it */
4901 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4902 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4903 return IRQ_NONE;
4904 }
4905
4906 while (1) {
4907 ipr_cmd = NULL;
4908
4909 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4910 ioa_cfg->toggle_bit) {
4911
4912 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4913 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4914
4915 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004916 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 return IRQ_HANDLED;
4919 }
4920
4921 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4922
4923 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4924
4925 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4926
4927 list_del(&ipr_cmd->queue);
4928 del_timer(&ipr_cmd->timer);
4929 ipr_cmd->done(ipr_cmd);
4930
4931 rc = IRQ_HANDLED;
4932
4933 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4934 ioa_cfg->hrrq_curr++;
4935 } else {
4936 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4937 ioa_cfg->toggle_bit ^= 1u;
4938 }
4939 }
4940
4941 if (ipr_cmd != NULL) {
4942 /* Clear the PCI interrupt */
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004943 do {
4944 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4945 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4946 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4947 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4948
4949 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4950 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4952 return IRQ_HANDLED;
4953 }
4954
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955 } else
4956 break;
4957 }
4958
4959 if (unlikely(rc == IRQ_NONE))
4960 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4961
4962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4963 return rc;
4964}
4965
4966/**
Wayne Boyera32c0552010-02-19 13:23:36 -08004967 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968 * @ioa_cfg: ioa config struct
4969 * @ipr_cmd: ipr command struct
4970 *
4971 * Return value:
4972 * 0 on success / -1 on failure
4973 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08004974static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4975 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004977 int i, nseg;
4978 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979 u32 length;
4980 u32 ioadl_flags = 0;
4981 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4982 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08004983 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004985 length = scsi_bufflen(scsi_cmd);
4986 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 return 0;
4988
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004989 nseg = scsi_dma_map(scsi_cmd);
4990 if (nseg < 0) {
4991 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4992 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 }
4994
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004995 ipr_cmd->dma_use_sg = nseg;
4996
4997 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4998 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4999 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005000 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5001 ioadl_flags = IPR_IOADL_FLAGS_READ;
5002
5003 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5004 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5005 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5006 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5007 }
5008
5009 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5010 return 0;
5011}
5012
5013/**
5014 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5015 * @ioa_cfg: ioa config struct
5016 * @ipr_cmd: ipr command struct
5017 *
5018 * Return value:
5019 * 0 on success / -1 on failure
5020 **/
5021static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5022 struct ipr_cmnd *ipr_cmd)
5023{
5024 int i, nseg;
5025 struct scatterlist *sg;
5026 u32 length;
5027 u32 ioadl_flags = 0;
5028 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5029 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5031
5032 length = scsi_bufflen(scsi_cmd);
5033 if (!length)
5034 return 0;
5035
5036 nseg = scsi_dma_map(scsi_cmd);
5037 if (nseg < 0) {
5038 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5039 return -1;
5040 }
5041
5042 ipr_cmd->dma_use_sg = nseg;
5043
5044 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5045 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5046 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5047 ioarcb->data_transfer_length = cpu_to_be32(length);
5048 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005049 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5050 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5051 ioadl_flags = IPR_IOADL_FLAGS_READ;
5052 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5053 ioarcb->read_ioadl_len =
5054 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5055 }
5056
Wayne Boyera32c0552010-02-19 13:23:36 -08005057 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5058 ioadl = ioarcb->u.add_data.u.ioadl;
5059 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5060 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005061 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5062 }
5063
5064 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5065 ioadl[i].flags_and_data_len =
5066 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5067 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5068 }
5069
5070 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5071 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072}
5073
5074/**
5075 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5076 * @scsi_cmd: scsi command struct
5077 *
5078 * Return value:
5079 * task attributes
5080 **/
5081static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5082{
5083 u8 tag[2];
5084 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5085
5086 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5087 switch (tag[0]) {
5088 case MSG_SIMPLE_TAG:
5089 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5090 break;
5091 case MSG_HEAD_TAG:
5092 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5093 break;
5094 case MSG_ORDERED_TAG:
5095 rc = IPR_FLAGS_LO_ORDERED_TASK;
5096 break;
5097 };
5098 }
5099
5100 return rc;
5101}
5102
5103/**
5104 * ipr_erp_done - Process completion of ERP for a device
5105 * @ipr_cmd: ipr command struct
5106 *
5107 * This function copies the sense buffer into the scsi_cmd
5108 * struct and pushes the scsi_done function.
5109 *
5110 * Return value:
5111 * nothing
5112 **/
5113static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5114{
5115 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5116 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5117 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5118 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5119
5120 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5121 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005122 scmd_printk(KERN_ERR, scsi_cmd,
5123 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 } else {
5125 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5126 SCSI_SENSE_BUFFERSIZE);
5127 }
5128
5129 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005130 if (!ipr_is_naca_model(res))
5131 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132 res->in_erp = 0;
5133 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005134 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5136 scsi_cmd->scsi_done(scsi_cmd);
5137}
5138
5139/**
5140 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5141 * @ipr_cmd: ipr command struct
5142 *
5143 * Return value:
5144 * none
5145 **/
5146static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5147{
Brian King51b1c7e2007-03-29 12:43:50 -05005148 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5149 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005150 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151
5152 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005153 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005155 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156 ioarcb->read_ioadl_len = 0;
5157 ioasa->ioasc = 0;
5158 ioasa->residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005159
5160 if (ipr_cmd->ioa_cfg->sis64)
5161 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5162 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5163 else {
5164 ioarcb->write_ioadl_addr =
5165 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5166 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168}
5169
5170/**
5171 * ipr_erp_request_sense - Send request sense to a device
5172 * @ipr_cmd: ipr command struct
5173 *
5174 * This function sends a request sense to a device as a result
5175 * of a check condition.
5176 *
5177 * Return value:
5178 * nothing
5179 **/
5180static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5181{
5182 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5183 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5184
5185 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5186 ipr_erp_done(ipr_cmd);
5187 return;
5188 }
5189
5190 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5191
5192 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5193 cmd_pkt->cdb[0] = REQUEST_SENSE;
5194 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5195 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5196 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5197 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5198
Wayne Boyera32c0552010-02-19 13:23:36 -08005199 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5200 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201
5202 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5203 IPR_REQUEST_SENSE_TIMEOUT * 2);
5204}
5205
5206/**
5207 * ipr_erp_cancel_all - Send cancel all to a device
5208 * @ipr_cmd: ipr command struct
5209 *
5210 * This function sends a cancel all to a device to clear the
5211 * queue. If we are running TCQ on the device, QERR is set to 1,
5212 * which means all outstanding ops have been dropped on the floor.
5213 * Cancel all will return them to us.
5214 *
5215 * Return value:
5216 * nothing
5217 **/
5218static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5219{
5220 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5221 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5222 struct ipr_cmd_pkt *cmd_pkt;
5223
5224 res->in_erp = 1;
5225
5226 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5227
5228 if (!scsi_get_tag_type(scsi_cmd->device)) {
5229 ipr_erp_request_sense(ipr_cmd);
5230 return;
5231 }
5232
5233 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5234 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5235 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5236
5237 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5238 IPR_CANCEL_ALL_TIMEOUT);
5239}
5240
5241/**
5242 * ipr_dump_ioasa - Dump contents of IOASA
5243 * @ioa_cfg: ioa config struct
5244 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005245 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 *
5247 * This function is invoked by the interrupt handler when ops
5248 * fail. It will log the IOASA if appropriate. Only called
5249 * for GPDD ops.
5250 *
5251 * Return value:
5252 * none
5253 **/
5254static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005255 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256{
5257 int i;
5258 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005259 u32 ioasc, fd_ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5261 __be32 *ioasa_data = (__be32 *)ioasa;
5262 int error_index;
5263
5264 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
Brian Kingb0692dd2007-03-29 12:43:09 -05005265 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266
5267 if (0 == ioasc)
5268 return;
5269
5270 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5271 return;
5272
Brian Kingb0692dd2007-03-29 12:43:09 -05005273 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5274 error_index = ipr_get_error(fd_ioasc);
5275 else
5276 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277
5278 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5279 /* Don't log an error if the IOA already logged one */
5280 if (ioasa->ilid != 0)
5281 return;
5282
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005283 if (!ipr_is_gscsi(res))
5284 return;
5285
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 if (ipr_error_table[error_index].log_ioasa == 0)
5287 return;
5288 }
5289
Brian Kingfe964d02006-03-29 09:37:29 -06005290 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291
5292 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5293 data_len = sizeof(struct ipr_ioasa);
5294 else
5295 data_len = be16_to_cpu(ioasa->ret_stat_len);
5296
5297 ipr_err("IOASA Dump:\n");
5298
5299 for (i = 0; i < data_len / 4; i += 4) {
5300 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5301 be32_to_cpu(ioasa_data[i]),
5302 be32_to_cpu(ioasa_data[i+1]),
5303 be32_to_cpu(ioasa_data[i+2]),
5304 be32_to_cpu(ioasa_data[i+3]));
5305 }
5306}
5307
5308/**
5309 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5310 * @ioasa: IOASA
5311 * @sense_buf: sense data buffer
5312 *
5313 * Return value:
5314 * none
5315 **/
5316static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5317{
5318 u32 failing_lba;
5319 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5320 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5321 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5322 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5323
5324 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5325
5326 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5327 return;
5328
5329 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5330
5331 if (ipr_is_vset_device(res) &&
5332 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5333 ioasa->u.vset.failing_lba_hi != 0) {
5334 sense_buf[0] = 0x72;
5335 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5336 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5337 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5338
5339 sense_buf[7] = 12;
5340 sense_buf[8] = 0;
5341 sense_buf[9] = 0x0A;
5342 sense_buf[10] = 0x80;
5343
5344 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5345
5346 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5347 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5348 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5349 sense_buf[15] = failing_lba & 0x000000ff;
5350
5351 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5352
5353 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5354 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5355 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5356 sense_buf[19] = failing_lba & 0x000000ff;
5357 } else {
5358 sense_buf[0] = 0x70;
5359 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5360 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5361 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5362
5363 /* Illegal request */
5364 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5365 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5366 sense_buf[7] = 10; /* additional length */
5367
5368 /* IOARCB was in error */
5369 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5370 sense_buf[15] = 0xC0;
5371 else /* Parameter data was invalid */
5372 sense_buf[15] = 0x80;
5373
5374 sense_buf[16] =
5375 ((IPR_FIELD_POINTER_MASK &
5376 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5377 sense_buf[17] =
5378 (IPR_FIELD_POINTER_MASK &
5379 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5380 } else {
5381 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5382 if (ipr_is_vset_device(res))
5383 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5384 else
5385 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5386
5387 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5388 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5389 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5390 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5391 sense_buf[6] = failing_lba & 0x000000ff;
5392 }
5393
5394 sense_buf[7] = 6; /* additional length */
5395 }
5396 }
5397}
5398
5399/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005400 * ipr_get_autosense - Copy autosense data to sense buffer
5401 * @ipr_cmd: ipr command struct
5402 *
5403 * This function copies the autosense buffer to the buffer
5404 * in the scsi_cmd, if there is autosense available.
5405 *
5406 * Return value:
5407 * 1 if autosense was available / 0 if not
5408 **/
5409static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5410{
5411 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5412
Brian King117d2ce2006-08-02 14:57:58 -05005413 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005414 return 0;
5415
5416 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5417 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5418 SCSI_SENSE_BUFFERSIZE));
5419 return 1;
5420}
5421
5422/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 * ipr_erp_start - Process an error response for a SCSI op
5424 * @ioa_cfg: ioa config struct
5425 * @ipr_cmd: ipr command struct
5426 *
5427 * This function determines whether or not to initiate ERP
5428 * on the affected device.
5429 *
5430 * Return value:
5431 * nothing
5432 **/
5433static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5434 struct ipr_cmnd *ipr_cmd)
5435{
5436 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5437 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5438 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005439 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440
5441 if (!res) {
5442 ipr_scsi_eh_done(ipr_cmd);
5443 return;
5444 }
5445
Brian King8a048992007-04-26 16:00:10 -05005446 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 ipr_gen_sense(ipr_cmd);
5448
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005449 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5450
Brian King8a048992007-04-26 16:00:10 -05005451 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005453 if (ipr_is_naca_model(res))
5454 scsi_cmd->result |= (DID_ABORT << 16);
5455 else
5456 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 break;
5458 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005459 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5461 break;
5462 case IPR_IOASC_HW_SEL_TIMEOUT:
5463 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005464 if (!ipr_is_naca_model(res))
5465 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 break;
5467 case IPR_IOASC_SYNC_REQUIRED:
5468 if (!res->in_erp)
5469 res->needs_sync_complete = 1;
5470 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5471 break;
5472 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005473 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5475 break;
5476 case IPR_IOASC_BUS_WAS_RESET:
5477 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5478 /*
5479 * Report the bus reset and ask for a retry. The device
5480 * will give CC/UA the next command.
5481 */
5482 if (!res->resetting_device)
5483 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5484 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005485 if (!ipr_is_naca_model(res))
5486 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487 break;
5488 case IPR_IOASC_HW_DEV_BUS_STATUS:
5489 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5490 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005491 if (!ipr_get_autosense(ipr_cmd)) {
5492 if (!ipr_is_naca_model(res)) {
5493 ipr_erp_cancel_all(ipr_cmd);
5494 return;
5495 }
5496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005497 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005498 if (!ipr_is_naca_model(res))
5499 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005500 break;
5501 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5502 break;
5503 default:
Brian King5b7304f2006-08-02 14:57:51 -05005504 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5505 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005506 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005507 res->needs_sync_complete = 1;
5508 break;
5509 }
5510
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005511 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005512 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5513 scsi_cmd->scsi_done(scsi_cmd);
5514}
5515
5516/**
5517 * ipr_scsi_done - mid-layer done function
5518 * @ipr_cmd: ipr command struct
5519 *
5520 * This function is invoked by the interrupt handler for
5521 * ops generated by the SCSI mid-layer
5522 *
5523 * Return value:
5524 * none
5525 **/
5526static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5527{
5528 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5529 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5530 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5531
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005532 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533
5534 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005535 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5537 scsi_cmd->scsi_done(scsi_cmd);
5538 } else
5539 ipr_erp_start(ioa_cfg, ipr_cmd);
5540}
5541
5542/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005543 * ipr_queuecommand - Queue a mid-layer request
5544 * @scsi_cmd: scsi command struct
5545 * @done: done function
5546 *
5547 * This function queues a request generated by the mid-layer.
5548 *
5549 * Return value:
5550 * 0 on success
5551 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5552 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5553 **/
5554static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5555 void (*done) (struct scsi_cmnd *))
5556{
5557 struct ipr_ioa_cfg *ioa_cfg;
5558 struct ipr_resource_entry *res;
5559 struct ipr_ioarcb *ioarcb;
5560 struct ipr_cmnd *ipr_cmd;
5561 int rc = 0;
5562
5563 scsi_cmd->scsi_done = done;
5564 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5565 res = scsi_cmd->device->hostdata;
5566 scsi_cmd->result = (DID_OK << 16);
5567
5568 /*
5569 * We are currently blocking all devices due to a host reset
5570 * We have told the host to stop giving us new requests, but
5571 * ERP ops don't count. FIXME
5572 */
5573 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5574 return SCSI_MLQUEUE_HOST_BUSY;
5575
5576 /*
5577 * FIXME - Create scsi_set_host_offline interface
5578 * and the ioa_is_dead check can be removed
5579 */
5580 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5581 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5582 scsi_cmd->result = (DID_NO_CONNECT << 16);
5583 scsi_cmd->scsi_done(scsi_cmd);
5584 return 0;
5585 }
5586
Brian King35a39692006-09-25 12:39:20 -05005587 if (ipr_is_gata(res) && res->sata_port)
5588 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5589
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5591 ioarcb = &ipr_cmd->ioarcb;
5592 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5593
5594 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5595 ipr_cmd->scsi_cmd = scsi_cmd;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005596 ioarcb->res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597 ipr_cmd->done = ipr_scsi_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005598 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599
5600 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5601 if (scsi_cmd->underflow == 0)
5602 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5603
5604 if (res->needs_sync_complete) {
5605 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5606 res->needs_sync_complete = 0;
5607 }
5608
5609 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5610 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5611 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5612 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5613 }
5614
5615 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5616 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5617 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5618
Wayne Boyera32c0552010-02-19 13:23:36 -08005619 if (likely(rc == 0)) {
5620 if (ioa_cfg->sis64)
5621 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5622 else
5623 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005625
5626 if (likely(rc == 0)) {
5627 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08005628 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629 } else {
5630 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5631 return SCSI_MLQUEUE_HOST_BUSY;
5632 }
5633
5634 return 0;
5635}
5636
5637/**
Brian King35a39692006-09-25 12:39:20 -05005638 * ipr_ioctl - IOCTL handler
5639 * @sdev: scsi device struct
5640 * @cmd: IOCTL cmd
5641 * @arg: IOCTL arg
5642 *
5643 * Return value:
5644 * 0 on success / other on failure
5645 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06005646static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05005647{
5648 struct ipr_resource_entry *res;
5649
5650 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05005651 if (res && ipr_is_gata(res)) {
5652 if (cmd == HDIO_GET_IDENTITY)
5653 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05005654 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05005655 }
Brian King35a39692006-09-25 12:39:20 -05005656
5657 return -EINVAL;
5658}
5659
5660/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661 * ipr_info - Get information about the card/driver
5662 * @scsi_host: scsi host struct
5663 *
5664 * Return value:
5665 * pointer to buffer with description string
5666 **/
5667static const char * ipr_ioa_info(struct Scsi_Host *host)
5668{
5669 static char buffer[512];
5670 struct ipr_ioa_cfg *ioa_cfg;
5671 unsigned long lock_flags = 0;
5672
5673 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5674
5675 spin_lock_irqsave(host->host_lock, lock_flags);
5676 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5677 spin_unlock_irqrestore(host->host_lock, lock_flags);
5678
5679 return buffer;
5680}
5681
5682static struct scsi_host_template driver_template = {
5683 .module = THIS_MODULE,
5684 .name = "IPR",
5685 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05005686 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005687 .queuecommand = ipr_queuecommand,
5688 .eh_abort_handler = ipr_eh_abort,
5689 .eh_device_reset_handler = ipr_eh_dev_reset,
5690 .eh_host_reset_handler = ipr_eh_host_reset,
5691 .slave_alloc = ipr_slave_alloc,
5692 .slave_configure = ipr_slave_configure,
5693 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05005694 .target_alloc = ipr_target_alloc,
5695 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696 .change_queue_depth = ipr_change_queue_depth,
5697 .change_queue_type = ipr_change_queue_type,
5698 .bios_param = ipr_biosparam,
5699 .can_queue = IPR_MAX_COMMANDS,
5700 .this_id = -1,
5701 .sg_tablesize = IPR_MAX_SGLIST,
5702 .max_sectors = IPR_IOA_MAX_SECTORS,
5703 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5704 .use_clustering = ENABLE_CLUSTERING,
5705 .shost_attrs = ipr_ioa_attrs,
5706 .sdev_attrs = ipr_dev_attrs,
5707 .proc_name = IPR_NAME
5708};
5709
Brian King35a39692006-09-25 12:39:20 -05005710/**
5711 * ipr_ata_phy_reset - libata phy_reset handler
5712 * @ap: ata port to reset
5713 *
5714 **/
5715static void ipr_ata_phy_reset(struct ata_port *ap)
5716{
5717 unsigned long flags;
5718 struct ipr_sata_port *sata_port = ap->private_data;
5719 struct ipr_resource_entry *res = sata_port->res;
5720 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5721 int rc;
5722
5723 ENTER;
5724 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5725 while(ioa_cfg->in_reset_reload) {
5726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5727 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5728 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5729 }
5730
5731 if (!ioa_cfg->allow_cmds)
5732 goto out_unlock;
5733
5734 rc = ipr_device_reset(ioa_cfg, res);
5735
5736 if (rc) {
Jeff Garzikac8869d2007-08-16 03:17:03 -04005737 ata_port_disable(ap);
Brian King35a39692006-09-25 12:39:20 -05005738 goto out_unlock;
5739 }
5740
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005741 ap->link.device[0].class = res->ata_class;
5742 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Jeff Garzikac8869d2007-08-16 03:17:03 -04005743 ata_port_disable(ap);
Brian King35a39692006-09-25 12:39:20 -05005744
5745out_unlock:
5746 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5747 LEAVE;
5748}
5749
5750/**
5751 * ipr_ata_post_internal - Cleanup after an internal command
5752 * @qc: ATA queued command
5753 *
5754 * Return value:
5755 * none
5756 **/
5757static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5758{
5759 struct ipr_sata_port *sata_port = qc->ap->private_data;
5760 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5761 struct ipr_cmnd *ipr_cmd;
5762 unsigned long flags;
5763
5764 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King73d98ff2006-11-21 10:27:58 -06005765 while(ioa_cfg->in_reset_reload) {
5766 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5767 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5768 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5769 }
5770
Brian King35a39692006-09-25 12:39:20 -05005771 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5772 if (ipr_cmd->qc == qc) {
5773 ipr_device_reset(ioa_cfg, sata_port->res);
5774 break;
5775 }
5776 }
5777 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5778}
5779
5780/**
Brian King35a39692006-09-25 12:39:20 -05005781 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5782 * @regs: destination
5783 * @tf: source ATA taskfile
5784 *
5785 * Return value:
5786 * none
5787 **/
5788static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5789 struct ata_taskfile *tf)
5790{
5791 regs->feature = tf->feature;
5792 regs->nsect = tf->nsect;
5793 regs->lbal = tf->lbal;
5794 regs->lbam = tf->lbam;
5795 regs->lbah = tf->lbah;
5796 regs->device = tf->device;
5797 regs->command = tf->command;
5798 regs->hob_feature = tf->hob_feature;
5799 regs->hob_nsect = tf->hob_nsect;
5800 regs->hob_lbal = tf->hob_lbal;
5801 regs->hob_lbam = tf->hob_lbam;
5802 regs->hob_lbah = tf->hob_lbah;
5803 regs->ctl = tf->ctl;
5804}
5805
5806/**
5807 * ipr_sata_done - done function for SATA commands
5808 * @ipr_cmd: ipr command struct
5809 *
5810 * This function is invoked by the interrupt handler for
5811 * ops generated by the SCSI mid-layer to SATA devices
5812 *
5813 * Return value:
5814 * none
5815 **/
5816static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5817{
5818 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5819 struct ata_queued_cmd *qc = ipr_cmd->qc;
5820 struct ipr_sata_port *sata_port = qc->ap->private_data;
5821 struct ipr_resource_entry *res = sata_port->res;
5822 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5823
5824 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5825 sizeof(struct ipr_ioasa_gata));
5826 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5827
5828 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005829 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05005830
5831 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5832 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5833 else
5834 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5835 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5836 ata_qc_complete(qc);
5837}
5838
5839/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005840 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5841 * @ipr_cmd: ipr command struct
5842 * @qc: ATA queued command
5843 *
5844 **/
5845static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5846 struct ata_queued_cmd *qc)
5847{
5848 u32 ioadl_flags = 0;
5849 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5850 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5851 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5852 int len = qc->nbytes;
5853 struct scatterlist *sg;
5854 unsigned int si;
5855 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5856
5857 if (len == 0)
5858 return;
5859
5860 if (qc->dma_dir == DMA_TO_DEVICE) {
5861 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5862 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5863 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5864 ioadl_flags = IPR_IOADL_FLAGS_READ;
5865
5866 ioarcb->data_transfer_length = cpu_to_be32(len);
5867 ioarcb->ioadl_len =
5868 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5869 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5870 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5871
5872 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5873 ioadl64->flags = cpu_to_be32(ioadl_flags);
5874 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5875 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5876
5877 last_ioadl64 = ioadl64;
5878 ioadl64++;
5879 }
5880
5881 if (likely(last_ioadl64))
5882 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5883}
5884
5885/**
Brian King35a39692006-09-25 12:39:20 -05005886 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5887 * @ipr_cmd: ipr command struct
5888 * @qc: ATA queued command
5889 *
5890 **/
5891static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5892 struct ata_queued_cmd *qc)
5893{
5894 u32 ioadl_flags = 0;
5895 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005896 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005897 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01005898 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05005899 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09005900 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05005901
5902 if (len == 0)
5903 return;
5904
5905 if (qc->dma_dir == DMA_TO_DEVICE) {
5906 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5907 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005908 ioarcb->data_transfer_length = cpu_to_be32(len);
5909 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05005910 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5911 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5912 ioadl_flags = IPR_IOADL_FLAGS_READ;
5913 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5914 ioarcb->read_ioadl_len =
5915 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5916 }
5917
Tejun Heoff2aeb12007-12-05 16:43:11 +09005918 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05005919 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5920 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005921
5922 last_ioadl = ioadl;
5923 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05005924 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005925
5926 if (likely(last_ioadl))
5927 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05005928}
5929
5930/**
5931 * ipr_qc_issue - Issue a SATA qc to a device
5932 * @qc: queued command
5933 *
5934 * Return value:
5935 * 0 if success
5936 **/
5937static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5938{
5939 struct ata_port *ap = qc->ap;
5940 struct ipr_sata_port *sata_port = ap->private_data;
5941 struct ipr_resource_entry *res = sata_port->res;
5942 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5943 struct ipr_cmnd *ipr_cmd;
5944 struct ipr_ioarcb *ioarcb;
5945 struct ipr_ioarcb_ata_regs *regs;
5946
5947 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
Brian King0feeed82007-03-29 12:43:43 -05005948 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05005949
5950 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5951 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05005952
Wayne Boyera32c0552010-02-19 13:23:36 -08005953 if (ioa_cfg->sis64) {
5954 regs = &ipr_cmd->i.ata_ioadl.regs;
5955 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5956 } else
5957 regs = &ioarcb->u.add_data.u.regs;
5958
5959 memset(regs, 0, sizeof(*regs));
5960 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05005961
5962 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5963 ipr_cmd->qc = qc;
5964 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005965 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05005966 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5967 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5968 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01005969 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05005970
Wayne Boyera32c0552010-02-19 13:23:36 -08005971 if (ioa_cfg->sis64)
5972 ipr_build_ata_ioadl64(ipr_cmd, qc);
5973 else
5974 ipr_build_ata_ioadl(ipr_cmd, qc);
5975
Brian King35a39692006-09-25 12:39:20 -05005976 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5977 ipr_copy_sata_tf(regs, &qc->tf);
5978 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005979 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05005980
5981 switch (qc->tf.protocol) {
5982 case ATA_PROT_NODATA:
5983 case ATA_PROT_PIO:
5984 break;
5985
5986 case ATA_PROT_DMA:
5987 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5988 break;
5989
Tejun Heo0dc36882007-12-18 16:34:43 -05005990 case ATAPI_PROT_PIO:
5991 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05005992 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5993 break;
5994
Tejun Heo0dc36882007-12-18 16:34:43 -05005995 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05005996 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5997 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5998 break;
5999
6000 default:
6001 WARN_ON(1);
Brian King0feeed82007-03-29 12:43:43 -05006002 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006003 }
6004
6005 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08006006
6007 ipr_send_command(ipr_cmd);
6008
Brian King35a39692006-09-25 12:39:20 -05006009 return 0;
6010}
6011
6012/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006013 * ipr_qc_fill_rtf - Read result TF
6014 * @qc: ATA queued command
6015 *
6016 * Return value:
6017 * true
6018 **/
6019static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6020{
6021 struct ipr_sata_port *sata_port = qc->ap->private_data;
6022 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6023 struct ata_taskfile *tf = &qc->result_tf;
6024
6025 tf->feature = g->error;
6026 tf->nsect = g->nsect;
6027 tf->lbal = g->lbal;
6028 tf->lbam = g->lbam;
6029 tf->lbah = g->lbah;
6030 tf->device = g->device;
6031 tf->command = g->status;
6032 tf->hob_nsect = g->hob_nsect;
6033 tf->hob_lbal = g->hob_lbal;
6034 tf->hob_lbam = g->hob_lbam;
6035 tf->hob_lbah = g->hob_lbah;
6036 tf->ctl = g->alt_status;
6037
6038 return true;
6039}
6040
Brian King35a39692006-09-25 12:39:20 -05006041static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006042 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006043 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006044 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006045 .qc_prep = ata_noop_qc_prep,
6046 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006047 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006048 .port_start = ata_sas_port_start,
6049 .port_stop = ata_sas_port_stop
6050};
6051
6052static struct ata_port_info sata_port_info = {
6053 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6054 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6055 .pio_mask = 0x10, /* pio4 */
6056 .mwdma_mask = 0x07,
6057 .udma_mask = 0x7f, /* udma0-6 */
6058 .port_ops = &ipr_sata_ops
6059};
6060
Linus Torvalds1da177e2005-04-16 15:20:36 -07006061#ifdef CONFIG_PPC_PSERIES
6062static const u16 ipr_blocked_processors[] = {
6063 PV_NORTHSTAR,
6064 PV_PULSAR,
6065 PV_POWER4,
6066 PV_ICESTAR,
6067 PV_SSTAR,
6068 PV_POWER4p,
6069 PV_630,
6070 PV_630p
6071};
6072
6073/**
6074 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6075 * @ioa_cfg: ioa cfg struct
6076 *
6077 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6078 * certain pSeries hardware. This function determines if the given
6079 * adapter is in one of these confgurations or not.
6080 *
6081 * Return value:
6082 * 1 if adapter is not supported / 0 if adapter is supported
6083 **/
6084static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6085{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006086 int i;
6087
Auke Kok44c10132007-06-08 15:46:36 -07006088 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6089 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6090 if (__is_processor(ipr_blocked_processors[i]))
6091 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006092 }
6093 }
6094 return 0;
6095}
6096#else
6097#define ipr_invalid_adapter(ioa_cfg) 0
6098#endif
6099
6100/**
6101 * ipr_ioa_bringdown_done - IOA bring down completion.
6102 * @ipr_cmd: ipr command struct
6103 *
6104 * This function processes the completion of an adapter bring down.
6105 * It wakes any reset sleepers.
6106 *
6107 * Return value:
6108 * IPR_RC_JOB_RETURN
6109 **/
6110static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6111{
6112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6113
6114 ENTER;
6115 ioa_cfg->in_reset_reload = 0;
6116 ioa_cfg->reset_retries = 0;
6117 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6118 wake_up_all(&ioa_cfg->reset_wait_q);
6119
6120 spin_unlock_irq(ioa_cfg->host->host_lock);
6121 scsi_unblock_requests(ioa_cfg->host);
6122 spin_lock_irq(ioa_cfg->host->host_lock);
6123 LEAVE;
6124
6125 return IPR_RC_JOB_RETURN;
6126}
6127
6128/**
6129 * ipr_ioa_reset_done - IOA reset completion.
6130 * @ipr_cmd: ipr command struct
6131 *
6132 * This function processes the completion of an adapter reset.
6133 * It schedules any necessary mid-layer add/removes and
6134 * wakes any reset sleepers.
6135 *
6136 * Return value:
6137 * IPR_RC_JOB_RETURN
6138 **/
6139static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6140{
6141 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6142 struct ipr_resource_entry *res;
6143 struct ipr_hostrcb *hostrcb, *temp;
6144 int i = 0;
6145
6146 ENTER;
6147 ioa_cfg->in_reset_reload = 0;
6148 ioa_cfg->allow_cmds = 1;
6149 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006150 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006151
6152 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6153 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6154 ipr_trace;
6155 break;
6156 }
6157 }
6158 schedule_work(&ioa_cfg->work_q);
6159
6160 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6161 list_del(&hostrcb->queue);
6162 if (i++ < IPR_NUM_LOG_HCAMS)
6163 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6164 else
6165 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6166 }
6167
Brian King6bb04172007-04-26 16:00:08 -05006168 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006169 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6170
6171 ioa_cfg->reset_retries = 0;
6172 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6173 wake_up_all(&ioa_cfg->reset_wait_q);
6174
Mark Nelson30237852008-12-10 12:23:20 +11006175 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006176 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006177 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178
6179 if (!ioa_cfg->allow_cmds)
6180 scsi_block_requests(ioa_cfg->host);
6181
6182 LEAVE;
6183 return IPR_RC_JOB_RETURN;
6184}
6185
6186/**
6187 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6188 * @supported_dev: supported device struct
6189 * @vpids: vendor product id struct
6190 *
6191 * Return value:
6192 * none
6193 **/
6194static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6195 struct ipr_std_inq_vpids *vpids)
6196{
6197 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6198 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6199 supported_dev->num_records = 1;
6200 supported_dev->data_length =
6201 cpu_to_be16(sizeof(struct ipr_supported_device));
6202 supported_dev->reserved = 0;
6203}
6204
6205/**
6206 * ipr_set_supported_devs - Send Set Supported Devices for a device
6207 * @ipr_cmd: ipr command struct
6208 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006209 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006210 *
6211 * Return value:
6212 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6213 **/
6214static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6215{
6216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6217 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006218 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6219 struct ipr_resource_entry *res = ipr_cmd->u.res;
6220
6221 ipr_cmd->job_step = ipr_ioa_reset_done;
6222
6223 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006224 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006225 continue;
6226
6227 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006228 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229
6230 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6231 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6232 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6233
6234 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006235 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6237 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6238
Wayne Boyera32c0552010-02-19 13:23:36 -08006239 ipr_init_ioadl(ipr_cmd,
6240 ioa_cfg->vpd_cbs_dma +
6241 offsetof(struct ipr_misc_cbs, supp_dev),
6242 sizeof(struct ipr_supported_device),
6243 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006244
6245 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6246 IPR_SET_SUP_DEVICE_TIMEOUT);
6247
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006248 if (!ioa_cfg->sis64)
6249 ipr_cmd->job_step = ipr_set_supported_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006250 return IPR_RC_JOB_RETURN;
6251 }
6252
6253 return IPR_RC_JOB_CONTINUE;
6254}
6255
6256/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06006257 * ipr_setup_write_cache - Disable write cache if needed
6258 * @ipr_cmd: ipr command struct
6259 *
6260 * This function sets up adapters write cache to desired setting
6261 *
6262 * Return value:
6263 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6264 **/
6265static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
6266{
6267 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6268
6269 ipr_cmd->job_step = ipr_set_supported_devs;
6270 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6271 struct ipr_resource_entry, queue);
6272
6273 if (ioa_cfg->cache_state != CACHE_DISABLED)
6274 return IPR_RC_JOB_CONTINUE;
6275
6276 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6277 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6278 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6279 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
6280
6281 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6282
6283 return IPR_RC_JOB_RETURN;
6284}
6285
6286/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006287 * ipr_get_mode_page - Locate specified mode page
6288 * @mode_pages: mode page buffer
6289 * @page_code: page code to find
6290 * @len: minimum required length for mode page
6291 *
6292 * Return value:
6293 * pointer to mode page / NULL on failure
6294 **/
6295static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6296 u32 page_code, u32 len)
6297{
6298 struct ipr_mode_page_hdr *mode_hdr;
6299 u32 page_length;
6300 u32 length;
6301
6302 if (!mode_pages || (mode_pages->hdr.length == 0))
6303 return NULL;
6304
6305 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6306 mode_hdr = (struct ipr_mode_page_hdr *)
6307 (mode_pages->data + mode_pages->hdr.block_desc_len);
6308
6309 while (length) {
6310 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6311 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6312 return mode_hdr;
6313 break;
6314 } else {
6315 page_length = (sizeof(struct ipr_mode_page_hdr) +
6316 mode_hdr->page_length);
6317 length -= page_length;
6318 mode_hdr = (struct ipr_mode_page_hdr *)
6319 ((unsigned long)mode_hdr + page_length);
6320 }
6321 }
6322 return NULL;
6323}
6324
6325/**
6326 * ipr_check_term_power - Check for term power errors
6327 * @ioa_cfg: ioa config struct
6328 * @mode_pages: IOAFP mode pages buffer
6329 *
6330 * Check the IOAFP's mode page 28 for term power errors
6331 *
6332 * Return value:
6333 * nothing
6334 **/
6335static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6336 struct ipr_mode_pages *mode_pages)
6337{
6338 int i;
6339 int entry_length;
6340 struct ipr_dev_bus_entry *bus;
6341 struct ipr_mode_page28 *mode_page;
6342
6343 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6344 sizeof(struct ipr_mode_page28));
6345
6346 entry_length = mode_page->entry_length;
6347
6348 bus = mode_page->bus;
6349
6350 for (i = 0; i < mode_page->num_entries; i++) {
6351 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6352 dev_err(&ioa_cfg->pdev->dev,
6353 "Term power is absent on scsi bus %d\n",
6354 bus->res_addr.bus);
6355 }
6356
6357 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6358 }
6359}
6360
6361/**
6362 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6363 * @ioa_cfg: ioa config struct
6364 *
6365 * Looks through the config table checking for SES devices. If
6366 * the SES device is in the SES table indicating a maximum SCSI
6367 * bus speed, the speed is limited for the bus.
6368 *
6369 * Return value:
6370 * none
6371 **/
6372static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6373{
6374 u32 max_xfer_rate;
6375 int i;
6376
6377 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6378 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6379 ioa_cfg->bus_attr[i].bus_width);
6380
6381 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6382 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6383 }
6384}
6385
6386/**
6387 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6388 * @ioa_cfg: ioa config struct
6389 * @mode_pages: mode page 28 buffer
6390 *
6391 * Updates mode page 28 based on driver configuration
6392 *
6393 * Return value:
6394 * none
6395 **/
6396static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6397 struct ipr_mode_pages *mode_pages)
6398{
6399 int i, entry_length;
6400 struct ipr_dev_bus_entry *bus;
6401 struct ipr_bus_attributes *bus_attr;
6402 struct ipr_mode_page28 *mode_page;
6403
6404 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6405 sizeof(struct ipr_mode_page28));
6406
6407 entry_length = mode_page->entry_length;
6408
6409 /* Loop for each device bus entry */
6410 for (i = 0, bus = mode_page->bus;
6411 i < mode_page->num_entries;
6412 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6413 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6414 dev_err(&ioa_cfg->pdev->dev,
6415 "Invalid resource address reported: 0x%08X\n",
6416 IPR_GET_PHYS_LOC(bus->res_addr));
6417 continue;
6418 }
6419
6420 bus_attr = &ioa_cfg->bus_attr[i];
6421 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6422 bus->bus_width = bus_attr->bus_width;
6423 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6424 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6425 if (bus_attr->qas_enabled)
6426 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6427 else
6428 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6429 }
6430}
6431
6432/**
6433 * ipr_build_mode_select - Build a mode select command
6434 * @ipr_cmd: ipr command struct
6435 * @res_handle: resource handle to send command to
6436 * @parm: Byte 2 of Mode Sense command
6437 * @dma_addr: DMA buffer address
6438 * @xfer_len: data transfer length
6439 *
6440 * Return value:
6441 * none
6442 **/
6443static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08006444 __be32 res_handle, u8 parm,
6445 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006446{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006447 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6448
6449 ioarcb->res_handle = res_handle;
6450 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6451 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6452 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6453 ioarcb->cmd_pkt.cdb[1] = parm;
6454 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6455
Wayne Boyera32c0552010-02-19 13:23:36 -08006456 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006457}
6458
6459/**
6460 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6461 * @ipr_cmd: ipr command struct
6462 *
6463 * This function sets up the SCSI bus attributes and sends
6464 * a Mode Select for Page 28 to activate them.
6465 *
6466 * Return value:
6467 * IPR_RC_JOB_RETURN
6468 **/
6469static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6470{
6471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6472 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6473 int length;
6474
6475 ENTER;
Brian King47338042006-02-08 20:57:42 -06006476 ipr_scsi_bus_speed_limit(ioa_cfg);
6477 ipr_check_term_power(ioa_cfg, mode_pages);
6478 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6479 length = mode_pages->hdr.length + 1;
6480 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481
6482 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6483 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6484 length);
6485
brking@us.ibm.com62275042005-11-01 17:01:14 -06006486 ipr_cmd->job_step = ipr_setup_write_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6488
6489 LEAVE;
6490 return IPR_RC_JOB_RETURN;
6491}
6492
6493/**
6494 * ipr_build_mode_sense - Builds a mode sense command
6495 * @ipr_cmd: ipr command struct
6496 * @res: resource entry struct
6497 * @parm: Byte 2 of mode sense command
6498 * @dma_addr: DMA address of mode sense buffer
6499 * @xfer_len: Size of DMA buffer
6500 *
6501 * Return value:
6502 * none
6503 **/
6504static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6505 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08006506 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6509
6510 ioarcb->res_handle = res_handle;
6511 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6512 ioarcb->cmd_pkt.cdb[2] = parm;
6513 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6514 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6515
Wayne Boyera32c0552010-02-19 13:23:36 -08006516 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006517}
6518
6519/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006520 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6521 * @ipr_cmd: ipr command struct
6522 *
6523 * This function handles the failure of an IOA bringup command.
6524 *
6525 * Return value:
6526 * IPR_RC_JOB_RETURN
6527 **/
6528static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6529{
6530 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6531 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6532
6533 dev_err(&ioa_cfg->pdev->dev,
6534 "0x%02X failed with IOASC: 0x%08X\n",
6535 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6536
6537 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6538 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6539 return IPR_RC_JOB_RETURN;
6540}
6541
6542/**
6543 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6544 * @ipr_cmd: ipr command struct
6545 *
6546 * This function handles the failure of a Mode Sense to the IOAFP.
6547 * Some adapters do not handle all mode pages.
6548 *
6549 * Return value:
6550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6551 **/
6552static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6553{
6554 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6555
6556 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6557 ipr_cmd->job_step = ipr_setup_write_cache;
6558 return IPR_RC_JOB_CONTINUE;
6559 }
6560
6561 return ipr_reset_cmd_failed(ipr_cmd);
6562}
6563
6564/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6566 * @ipr_cmd: ipr command struct
6567 *
6568 * This function send a Page 28 mode sense to the IOA to
6569 * retrieve SCSI bus attributes.
6570 *
6571 * Return value:
6572 * IPR_RC_JOB_RETURN
6573 **/
6574static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6575{
6576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6577
6578 ENTER;
6579 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6580 0x28, ioa_cfg->vpd_cbs_dma +
6581 offsetof(struct ipr_misc_cbs, mode_pages),
6582 sizeof(struct ipr_mode_pages));
6583
6584 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006585 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006586
6587 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6588
6589 LEAVE;
6590 return IPR_RC_JOB_RETURN;
6591}
6592
6593/**
Brian Kingac09c342007-04-26 16:00:16 -05006594 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6595 * @ipr_cmd: ipr command struct
6596 *
6597 * This function enables dual IOA RAID support if possible.
6598 *
6599 * Return value:
6600 * IPR_RC_JOB_RETURN
6601 **/
6602static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6603{
6604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6605 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6606 struct ipr_mode_page24 *mode_page;
6607 int length;
6608
6609 ENTER;
6610 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6611 sizeof(struct ipr_mode_page24));
6612
6613 if (mode_page)
6614 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6615
6616 length = mode_pages->hdr.length + 1;
6617 mode_pages->hdr.length = 0;
6618
6619 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6620 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6621 length);
6622
6623 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6624 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6625
6626 LEAVE;
6627 return IPR_RC_JOB_RETURN;
6628}
6629
6630/**
6631 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6632 * @ipr_cmd: ipr command struct
6633 *
6634 * This function handles the failure of a Mode Sense to the IOAFP.
6635 * Some adapters do not handle all mode pages.
6636 *
6637 * Return value:
6638 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6639 **/
6640static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6641{
6642 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6643
6644 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6645 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6646 return IPR_RC_JOB_CONTINUE;
6647 }
6648
6649 return ipr_reset_cmd_failed(ipr_cmd);
6650}
6651
6652/**
6653 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6654 * @ipr_cmd: ipr command struct
6655 *
6656 * This function send a mode sense to the IOA to retrieve
6657 * the IOA Advanced Function Control mode page.
6658 *
6659 * Return value:
6660 * IPR_RC_JOB_RETURN
6661 **/
6662static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6663{
6664 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6665
6666 ENTER;
6667 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6668 0x24, ioa_cfg->vpd_cbs_dma +
6669 offsetof(struct ipr_misc_cbs, mode_pages),
6670 sizeof(struct ipr_mode_pages));
6671
6672 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6673 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6674
6675 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6676
6677 LEAVE;
6678 return IPR_RC_JOB_RETURN;
6679}
6680
6681/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006682 * ipr_init_res_table - Initialize the resource table
6683 * @ipr_cmd: ipr command struct
6684 *
6685 * This function looks through the existing resource table, comparing
6686 * it with the config table. This function will take care of old/new
6687 * devices and schedule adding/removing them from the mid-layer
6688 * as appropriate.
6689 *
6690 * Return value:
6691 * IPR_RC_JOB_CONTINUE
6692 **/
6693static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6694{
6695 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6696 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006697 struct ipr_config_table_entry_wrapper cfgtew;
6698 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699 LIST_HEAD(old_res);
6700
6701 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006702 if (ioa_cfg->sis64)
6703 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6704 else
6705 flag = ioa_cfg->u.cfg_table->hdr.flags;
6706
6707 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006708 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6709
6710 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6711 list_move_tail(&res->queue, &old_res);
6712
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006713 if (ioa_cfg->sis64)
6714 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6715 else
6716 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6717
6718 for (i = 0; i < entries; i++) {
6719 if (ioa_cfg->sis64)
6720 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6721 else
6722 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723 found = 0;
6724
6725 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006726 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6728 found = 1;
6729 break;
6730 }
6731 }
6732
6733 if (!found) {
6734 if (list_empty(&ioa_cfg->free_res_q)) {
6735 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6736 break;
6737 }
6738
6739 found = 1;
6740 res = list_entry(ioa_cfg->free_res_q.next,
6741 struct ipr_resource_entry, queue);
6742 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006743 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006744 res->add_to_ml = 1;
6745 }
6746
6747 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006748 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006749 }
6750
6751 list_for_each_entry_safe(res, temp, &old_res, queue) {
6752 if (res->sdev) {
6753 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006754 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006755 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756 }
6757 }
6758
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006759 list_for_each_entry_safe(res, temp, &old_res, queue) {
6760 ipr_clear_res_target(res);
6761 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6762 }
6763
Brian Kingac09c342007-04-26 16:00:16 -05006764 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6765 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6766 else
6767 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006768
6769 LEAVE;
6770 return IPR_RC_JOB_CONTINUE;
6771}
6772
6773/**
6774 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6775 * @ipr_cmd: ipr command struct
6776 *
6777 * This function sends a Query IOA Configuration command
6778 * to the adapter to retrieve the IOA configuration table.
6779 *
6780 * Return value:
6781 * IPR_RC_JOB_RETURN
6782 **/
6783static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6784{
6785 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6786 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006787 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05006788 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789
6790 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05006791 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6792 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6794 ucode_vpd->major_release, ucode_vpd->card_type,
6795 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6796 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6797 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6798
6799 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006800 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6801 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006802
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006803 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08006804 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006805
6806 ipr_cmd->job_step = ipr_init_res_table;
6807
6808 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6809
6810 LEAVE;
6811 return IPR_RC_JOB_RETURN;
6812}
6813
6814/**
6815 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6816 * @ipr_cmd: ipr command struct
6817 *
6818 * This utility function sends an inquiry to the adapter.
6819 *
6820 * Return value:
6821 * none
6822 **/
6823static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08006824 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825{
6826 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006827
6828 ENTER;
6829 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6830 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6831
6832 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6833 ioarcb->cmd_pkt.cdb[1] = flags;
6834 ioarcb->cmd_pkt.cdb[2] = page;
6835 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6836
Wayne Boyera32c0552010-02-19 13:23:36 -08006837 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006838
6839 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6840 LEAVE;
6841}
6842
6843/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06006844 * ipr_inquiry_page_supported - Is the given inquiry page supported
6845 * @page0: inquiry page 0 buffer
6846 * @page: page code.
6847 *
6848 * This function determines if the specified inquiry page is supported.
6849 *
6850 * Return value:
6851 * 1 if page is supported / 0 if not
6852 **/
6853static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6854{
6855 int i;
6856
6857 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6858 if (page0->page[i] == page)
6859 return 1;
6860
6861 return 0;
6862}
6863
6864/**
Brian Kingac09c342007-04-26 16:00:16 -05006865 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6866 * @ipr_cmd: ipr command struct
6867 *
6868 * This function sends a Page 0xD0 inquiry to the adapter
6869 * to retrieve adapter capabilities.
6870 *
6871 * Return value:
6872 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6873 **/
6874static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6875{
6876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6877 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6878 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6879
6880 ENTER;
6881 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6882 memset(cap, 0, sizeof(*cap));
6883
6884 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6885 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6886 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6887 sizeof(struct ipr_inquiry_cap));
6888 return IPR_RC_JOB_RETURN;
6889 }
6890
6891 LEAVE;
6892 return IPR_RC_JOB_CONTINUE;
6893}
6894
6895/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6897 * @ipr_cmd: ipr command struct
6898 *
6899 * This function sends a Page 3 inquiry to the adapter
6900 * to retrieve software VPD information.
6901 *
6902 * Return value:
6903 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6904 **/
6905static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6906{
6907 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006908 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6909
6910 ENTER;
6911
6912 if (!ipr_inquiry_page_supported(page0, 1))
6913 ioa_cfg->cache_state = CACHE_NONE;
6914
Brian Kingac09c342007-04-26 16:00:16 -05006915 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006916
6917 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6918 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6919 sizeof(struct ipr_inquiry_page3));
6920
6921 LEAVE;
6922 return IPR_RC_JOB_RETURN;
6923}
6924
6925/**
6926 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6927 * @ipr_cmd: ipr command struct
6928 *
6929 * This function sends a Page 0 inquiry to the adapter
6930 * to retrieve supported inquiry pages.
6931 *
6932 * Return value:
6933 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6934 **/
6935static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6936{
6937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938 char type[5];
6939
6940 ENTER;
6941
6942 /* Grab the type out of the VPD and store it away */
6943 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6944 type[4] = '\0';
6945 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6946
brking@us.ibm.com62275042005-11-01 17:01:14 -06006947 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948
brking@us.ibm.com62275042005-11-01 17:01:14 -06006949 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6950 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6951 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006952
6953 LEAVE;
6954 return IPR_RC_JOB_RETURN;
6955}
6956
6957/**
6958 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6959 * @ipr_cmd: ipr command struct
6960 *
6961 * This function sends a standard inquiry to the adapter.
6962 *
6963 * Return value:
6964 * IPR_RC_JOB_RETURN
6965 **/
6966static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6967{
6968 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6969
6970 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006971 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006972
6973 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6974 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6975 sizeof(struct ipr_ioa_vpd));
6976
6977 LEAVE;
6978 return IPR_RC_JOB_RETURN;
6979}
6980
6981/**
6982 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6983 * @ipr_cmd: ipr command struct
6984 *
6985 * This function send an Identify Host Request Response Queue
6986 * command to establish the HRRQ with the adapter.
6987 *
6988 * Return value:
6989 * IPR_RC_JOB_RETURN
6990 **/
6991static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6992{
6993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6994 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6995
6996 ENTER;
6997 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6998
6999 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7000 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7001
7002 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7003 ioarcb->cmd_pkt.cdb[2] =
7004 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7005 ioarcb->cmd_pkt.cdb[3] =
7006 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7007 ioarcb->cmd_pkt.cdb[4] =
7008 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7009 ioarcb->cmd_pkt.cdb[5] =
7010 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
7011 ioarcb->cmd_pkt.cdb[7] =
7012 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7013 ioarcb->cmd_pkt.cdb[8] =
7014 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7015
7016 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7017
7018 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7019
7020 LEAVE;
7021 return IPR_RC_JOB_RETURN;
7022}
7023
7024/**
7025 * ipr_reset_timer_done - Adapter reset timer function
7026 * @ipr_cmd: ipr command struct
7027 *
7028 * Description: This function is used in adapter reset processing
7029 * for timing events. If the reset_cmd pointer in the IOA
7030 * config struct is not this adapter's we are doing nested
7031 * resets and fail_all_ops will take care of freeing the
7032 * command block.
7033 *
7034 * Return value:
7035 * none
7036 **/
7037static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7038{
7039 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7040 unsigned long lock_flags = 0;
7041
7042 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7043
7044 if (ioa_cfg->reset_cmd == ipr_cmd) {
7045 list_del(&ipr_cmd->queue);
7046 ipr_cmd->done(ipr_cmd);
7047 }
7048
7049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7050}
7051
7052/**
7053 * ipr_reset_start_timer - Start a timer for adapter reset job
7054 * @ipr_cmd: ipr command struct
7055 * @timeout: timeout value
7056 *
7057 * Description: This function is used in adapter reset processing
7058 * for timing events. If the reset_cmd pointer in the IOA
7059 * config struct is not this adapter's we are doing nested
7060 * resets and fail_all_ops will take care of freeing the
7061 * command block.
7062 *
7063 * Return value:
7064 * none
7065 **/
7066static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7067 unsigned long timeout)
7068{
7069 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7070 ipr_cmd->done = ipr_reset_ioa_job;
7071
7072 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7073 ipr_cmd->timer.expires = jiffies + timeout;
7074 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7075 add_timer(&ipr_cmd->timer);
7076}
7077
7078/**
7079 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7080 * @ioa_cfg: ioa cfg struct
7081 *
7082 * Return value:
7083 * nothing
7084 **/
7085static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7086{
7087 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7088
7089 /* Initialize Host RRQ pointers */
7090 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7091 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7092 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7093 ioa_cfg->toggle_bit = 1;
7094
7095 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007096 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007097}
7098
7099/**
7100 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7101 * @ipr_cmd: ipr command struct
7102 *
7103 * This function reinitializes some control blocks and
7104 * enables destructive diagnostics on the adapter.
7105 *
7106 * Return value:
7107 * IPR_RC_JOB_RETURN
7108 **/
7109static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7110{
7111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7112 volatile u32 int_reg;
7113
7114 ENTER;
7115 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
7116 ipr_init_ioa_mem(ioa_cfg);
7117
7118 ioa_cfg->allow_interrupts = 1;
7119 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7120
7121 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7122 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7123 ioa_cfg->regs.clr_interrupt_mask_reg);
7124 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7125 return IPR_RC_JOB_CONTINUE;
7126 }
7127
7128 /* Enable destructive diagnostics on IOA */
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007129 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130
7131 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
7132 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7133
7134 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7135
7136 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007137 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007138 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7139 ipr_cmd->done = ipr_reset_ioa_job;
7140 add_timer(&ipr_cmd->timer);
7141 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7142
7143 LEAVE;
7144 return IPR_RC_JOB_RETURN;
7145}
7146
7147/**
7148 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7149 * @ipr_cmd: ipr command struct
7150 *
7151 * This function is invoked when an adapter dump has run out
7152 * of processing time.
7153 *
7154 * Return value:
7155 * IPR_RC_JOB_CONTINUE
7156 **/
7157static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7158{
7159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7160
7161 if (ioa_cfg->sdt_state == GET_DUMP)
7162 ioa_cfg->sdt_state = ABORT_DUMP;
7163
7164 ipr_cmd->job_step = ipr_reset_alert;
7165
7166 return IPR_RC_JOB_CONTINUE;
7167}
7168
7169/**
7170 * ipr_unit_check_no_data - Log a unit check/no data error log
7171 * @ioa_cfg: ioa config struct
7172 *
7173 * Logs an error indicating the adapter unit checked, but for some
7174 * reason, we were unable to fetch the unit check buffer.
7175 *
7176 * Return value:
7177 * nothing
7178 **/
7179static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7180{
7181 ioa_cfg->errors_logged++;
7182 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7183}
7184
7185/**
7186 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7187 * @ioa_cfg: ioa config struct
7188 *
7189 * Fetches the unit check buffer from the adapter by clocking the data
7190 * through the mailbox register.
7191 *
7192 * Return value:
7193 * nothing
7194 **/
7195static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7196{
7197 unsigned long mailbox;
7198 struct ipr_hostrcb *hostrcb;
7199 struct ipr_uc_sdt sdt;
7200 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05007201 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202
7203 mailbox = readl(ioa_cfg->ioa_mailbox);
7204
7205 if (!ipr_sdt_is_fmt2(mailbox)) {
7206 ipr_unit_check_no_data(ioa_cfg);
7207 return;
7208 }
7209
7210 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7211 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7212 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7213
7214 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
7215 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
7216 ipr_unit_check_no_data(ioa_cfg);
7217 return;
7218 }
7219
7220 /* Find length of the first sdt entry (UC buffer) */
7221 length = (be32_to_cpu(sdt.entry[0].end_offset) -
7222 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
7223
7224 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7225 struct ipr_hostrcb, queue);
7226 list_del(&hostrcb->queue);
7227 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7228
7229 rc = ipr_get_ldump_data_section(ioa_cfg,
7230 be32_to_cpu(sdt.entry[0].bar_str_offset),
7231 (__be32 *)&hostrcb->hcam,
7232 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7233
Brian King65f56472007-04-26 16:00:12 -05007234 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08007236 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05007237 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7238 ioa_cfg->sdt_state == GET_DUMP)
7239 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7240 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241 ipr_unit_check_no_data(ioa_cfg);
7242
7243 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7244}
7245
7246/**
7247 * ipr_reset_restore_cfg_space - Restore PCI config space.
7248 * @ipr_cmd: ipr command struct
7249 *
7250 * Description: This function restores the saved PCI config space of
7251 * the adapter, fails all outstanding ops back to the callers, and
7252 * fetches the dump/unit check if applicable to this reset.
7253 *
7254 * Return value:
7255 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7256 **/
7257static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7258{
7259 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7260 int rc;
7261
7262 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02007263 ioa_cfg->pdev->state_saved = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007264 rc = pci_restore_state(ioa_cfg->pdev);
7265
7266 if (rc != PCIBIOS_SUCCESSFUL) {
7267 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7268 return IPR_RC_JOB_CONTINUE;
7269 }
7270
7271 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7272 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7273 return IPR_RC_JOB_CONTINUE;
7274 }
7275
7276 ipr_fail_all_ops(ioa_cfg);
7277
7278 if (ioa_cfg->ioa_unit_checked) {
7279 ioa_cfg->ioa_unit_checked = 0;
7280 ipr_get_unit_check_buffer(ioa_cfg);
7281 ipr_cmd->job_step = ipr_reset_alert;
7282 ipr_reset_start_timer(ipr_cmd, 0);
7283 return IPR_RC_JOB_RETURN;
7284 }
7285
7286 if (ioa_cfg->in_ioa_bringdown) {
7287 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7288 } else {
7289 ipr_cmd->job_step = ipr_reset_enable_ioa;
7290
7291 if (GET_DUMP == ioa_cfg->sdt_state) {
7292 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7293 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7294 schedule_work(&ioa_cfg->work_q);
7295 return IPR_RC_JOB_RETURN;
7296 }
7297 }
7298
7299 ENTER;
7300 return IPR_RC_JOB_CONTINUE;
7301}
7302
7303/**
Brian Kinge619e1a2007-01-23 11:25:37 -06007304 * ipr_reset_bist_done - BIST has completed on the adapter.
7305 * @ipr_cmd: ipr command struct
7306 *
7307 * Description: Unblock config space and resume the reset process.
7308 *
7309 * Return value:
7310 * IPR_RC_JOB_CONTINUE
7311 **/
7312static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7313{
7314 ENTER;
7315 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7316 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7317 LEAVE;
7318 return IPR_RC_JOB_CONTINUE;
7319}
7320
7321/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007322 * ipr_reset_start_bist - Run BIST on the adapter.
7323 * @ipr_cmd: ipr command struct
7324 *
7325 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7326 *
7327 * Return value:
7328 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7329 **/
7330static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7331{
7332 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7333 int rc;
7334
7335 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07007336 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007337 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7338
7339 if (rc != PCIBIOS_SUCCESSFUL) {
Brian Kinga9aedb02007-03-29 12:43:23 -05007340 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007341 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7342 rc = IPR_RC_JOB_CONTINUE;
7343 } else {
Brian Kinge619e1a2007-01-23 11:25:37 -06007344 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7346 rc = IPR_RC_JOB_RETURN;
7347 }
7348
7349 LEAVE;
7350 return rc;
7351}
7352
7353/**
Brian King463fc692007-05-07 17:09:05 -05007354 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7355 * @ipr_cmd: ipr command struct
7356 *
7357 * Description: This clears PCI reset to the adapter and delays two seconds.
7358 *
7359 * Return value:
7360 * IPR_RC_JOB_RETURN
7361 **/
7362static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7363{
7364 ENTER;
7365 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7366 ipr_cmd->job_step = ipr_reset_bist_done;
7367 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7368 LEAVE;
7369 return IPR_RC_JOB_RETURN;
7370}
7371
7372/**
7373 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7374 * @ipr_cmd: ipr command struct
7375 *
7376 * Description: This asserts PCI reset to the adapter.
7377 *
7378 * Return value:
7379 * IPR_RC_JOB_RETURN
7380 **/
7381static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7382{
7383 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7384 struct pci_dev *pdev = ioa_cfg->pdev;
7385
7386 ENTER;
7387 pci_block_user_cfg_access(pdev);
7388 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7389 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7390 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7391 LEAVE;
7392 return IPR_RC_JOB_RETURN;
7393}
7394
7395/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007396 * ipr_reset_allowed - Query whether or not IOA can be reset
7397 * @ioa_cfg: ioa config struct
7398 *
7399 * Return value:
7400 * 0 if reset not allowed / non-zero if reset is allowed
7401 **/
7402static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7403{
7404 volatile u32 temp_reg;
7405
7406 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7407 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7408}
7409
7410/**
7411 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7412 * @ipr_cmd: ipr command struct
7413 *
7414 * Description: This function waits for adapter permission to run BIST,
7415 * then runs BIST. If the adapter does not give permission after a
7416 * reasonable time, we will reset the adapter anyway. The impact of
7417 * resetting the adapter without warning the adapter is the risk of
7418 * losing the persistent error log on the adapter. If the adapter is
7419 * reset while it is writing to the flash on the adapter, the flash
7420 * segment will have bad ECC and be zeroed.
7421 *
7422 * Return value:
7423 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7424 **/
7425static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7426{
7427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7428 int rc = IPR_RC_JOB_RETURN;
7429
7430 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7431 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7432 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7433 } else {
Brian King463fc692007-05-07 17:09:05 -05007434 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435 rc = IPR_RC_JOB_CONTINUE;
7436 }
7437
7438 return rc;
7439}
7440
7441/**
7442 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7443 * @ipr_cmd: ipr command struct
7444 *
7445 * Description: This function alerts the adapter that it will be reset.
7446 * If memory space is not currently enabled, proceed directly
7447 * to running BIST on the adapter. The timer must always be started
7448 * so we guarantee we do not run BIST from ipr_isr.
7449 *
7450 * Return value:
7451 * IPR_RC_JOB_RETURN
7452 **/
7453static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7454{
7455 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7456 u16 cmd_reg;
7457 int rc;
7458
7459 ENTER;
7460 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7461
7462 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7463 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7464 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
7465 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7466 } else {
Brian King463fc692007-05-07 17:09:05 -05007467 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 }
7469
7470 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7471 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7472
7473 LEAVE;
7474 return IPR_RC_JOB_RETURN;
7475}
7476
7477/**
7478 * ipr_reset_ucode_download_done - Microcode download completion
7479 * @ipr_cmd: ipr command struct
7480 *
7481 * Description: This function unmaps the microcode download buffer.
7482 *
7483 * Return value:
7484 * IPR_RC_JOB_CONTINUE
7485 **/
7486static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7487{
7488 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7489 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7490
7491 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7492 sglist->num_sg, DMA_TO_DEVICE);
7493
7494 ipr_cmd->job_step = ipr_reset_alert;
7495 return IPR_RC_JOB_CONTINUE;
7496}
7497
7498/**
7499 * ipr_reset_ucode_download - Download microcode to the adapter
7500 * @ipr_cmd: ipr command struct
7501 *
7502 * Description: This function checks to see if it there is microcode
7503 * to download to the adapter. If there is, a download is performed.
7504 *
7505 * Return value:
7506 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7507 **/
7508static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7509{
7510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7511 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7512
7513 ENTER;
7514 ipr_cmd->job_step = ipr_reset_alert;
7515
7516 if (!sglist)
7517 return IPR_RC_JOB_CONTINUE;
7518
7519 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7520 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7521 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7522 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7523 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7524 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7525 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7526
Wayne Boyera32c0552010-02-19 13:23:36 -08007527 if (ioa_cfg->sis64)
7528 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7529 else
7530 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7532
7533 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7534 IPR_WRITE_BUFFER_TIMEOUT);
7535
7536 LEAVE;
7537 return IPR_RC_JOB_RETURN;
7538}
7539
7540/**
7541 * ipr_reset_shutdown_ioa - Shutdown the adapter
7542 * @ipr_cmd: ipr command struct
7543 *
7544 * Description: This function issues an adapter shutdown of the
7545 * specified type to the specified adapter as part of the
7546 * adapter reset job.
7547 *
7548 * Return value:
7549 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7550 **/
7551static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7552{
7553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7554 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7555 unsigned long timeout;
7556 int rc = IPR_RC_JOB_CONTINUE;
7557
7558 ENTER;
7559 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7560 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7561 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7562 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7563 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7564
Brian Kingac09c342007-04-26 16:00:16 -05007565 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7566 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007567 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7568 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05007569 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7570 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007571 else
Brian Kingac09c342007-04-26 16:00:16 -05007572 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573
7574 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7575
7576 rc = IPR_RC_JOB_RETURN;
7577 ipr_cmd->job_step = ipr_reset_ucode_download;
7578 } else
7579 ipr_cmd->job_step = ipr_reset_alert;
7580
7581 LEAVE;
7582 return rc;
7583}
7584
7585/**
7586 * ipr_reset_ioa_job - Adapter reset job
7587 * @ipr_cmd: ipr command struct
7588 *
7589 * Description: This function is the job router for the adapter reset job.
7590 *
7591 * Return value:
7592 * none
7593 **/
7594static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7595{
7596 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7598
7599 do {
7600 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7601
7602 if (ioa_cfg->reset_cmd != ipr_cmd) {
7603 /*
7604 * We are doing nested adapter resets and this is
7605 * not the current reset job.
7606 */
7607 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7608 return;
7609 }
7610
7611 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007612 rc = ipr_cmd->job_step_failed(ipr_cmd);
7613 if (rc == IPR_RC_JOB_RETURN)
7614 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007615 }
7616
7617 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007618 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007619 rc = ipr_cmd->job_step(ipr_cmd);
7620 } while(rc == IPR_RC_JOB_CONTINUE);
7621}
7622
7623/**
7624 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7625 * @ioa_cfg: ioa config struct
7626 * @job_step: first job step of reset job
7627 * @shutdown_type: shutdown type
7628 *
7629 * Description: This function will initiate the reset of the given adapter
7630 * starting at the selected job step.
7631 * If the caller needs to wait on the completion of the reset,
7632 * the caller must sleep on the reset_wait_q.
7633 *
7634 * Return value:
7635 * none
7636 **/
7637static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7638 int (*job_step) (struct ipr_cmnd *),
7639 enum ipr_shutdown_type shutdown_type)
7640{
7641 struct ipr_cmnd *ipr_cmd;
7642
7643 ioa_cfg->in_reset_reload = 1;
7644 ioa_cfg->allow_cmds = 0;
7645 scsi_block_requests(ioa_cfg->host);
7646
7647 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7648 ioa_cfg->reset_cmd = ipr_cmd;
7649 ipr_cmd->job_step = job_step;
7650 ipr_cmd->u.shutdown_type = shutdown_type;
7651
7652 ipr_reset_ioa_job(ipr_cmd);
7653}
7654
7655/**
7656 * ipr_initiate_ioa_reset - Initiate an adapter reset
7657 * @ioa_cfg: ioa config struct
7658 * @shutdown_type: shutdown type
7659 *
7660 * Description: This function will initiate the reset of the given adapter.
7661 * If the caller needs to wait on the completion of the reset,
7662 * the caller must sleep on the reset_wait_q.
7663 *
7664 * Return value:
7665 * none
7666 **/
7667static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7668 enum ipr_shutdown_type shutdown_type)
7669{
7670 if (ioa_cfg->ioa_is_dead)
7671 return;
7672
7673 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7674 ioa_cfg->sdt_state = ABORT_DUMP;
7675
7676 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7677 dev_err(&ioa_cfg->pdev->dev,
7678 "IOA taken offline - error recovery failed\n");
7679
7680 ioa_cfg->reset_retries = 0;
7681 ioa_cfg->ioa_is_dead = 1;
7682
7683 if (ioa_cfg->in_ioa_bringdown) {
7684 ioa_cfg->reset_cmd = NULL;
7685 ioa_cfg->in_reset_reload = 0;
7686 ipr_fail_all_ops(ioa_cfg);
7687 wake_up_all(&ioa_cfg->reset_wait_q);
7688
7689 spin_unlock_irq(ioa_cfg->host->host_lock);
7690 scsi_unblock_requests(ioa_cfg->host);
7691 spin_lock_irq(ioa_cfg->host->host_lock);
7692 return;
7693 } else {
7694 ioa_cfg->in_ioa_bringdown = 1;
7695 shutdown_type = IPR_SHUTDOWN_NONE;
7696 }
7697 }
7698
7699 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7700 shutdown_type);
7701}
7702
7703/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007704 * ipr_reset_freeze - Hold off all I/O activity
7705 * @ipr_cmd: ipr command struct
7706 *
7707 * Description: If the PCI slot is frozen, hold off all I/O
7708 * activity; then, as soon as the slot is available again,
7709 * initiate an adapter reset.
7710 */
7711static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7712{
7713 /* Disallow new interrupts, avoid loop */
7714 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7715 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7716 ipr_cmd->done = ipr_reset_ioa_job;
7717 return IPR_RC_JOB_RETURN;
7718}
7719
7720/**
7721 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7722 * @pdev: PCI device struct
7723 *
7724 * Description: This routine is called to tell us that the PCI bus
7725 * is down. Can't do anything here, except put the device driver
7726 * into a holding pattern, waiting for the PCI bus to come back.
7727 */
7728static void ipr_pci_frozen(struct pci_dev *pdev)
7729{
7730 unsigned long flags = 0;
7731 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7732
7733 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7734 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7736}
7737
7738/**
7739 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7740 * @pdev: PCI device struct
7741 *
7742 * Description: This routine is called by the pci error recovery
7743 * code after the PCI slot has been reset, just before we
7744 * should resume normal operations.
7745 */
7746static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7747{
7748 unsigned long flags = 0;
7749 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7750
7751 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05007752 if (ioa_cfg->needs_warm_reset)
7753 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7754 else
7755 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7756 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007757 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7758 return PCI_ERS_RESULT_RECOVERED;
7759}
7760
7761/**
7762 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7763 * @pdev: PCI device struct
7764 *
7765 * Description: This routine is called when the PCI bus has
7766 * permanently failed.
7767 */
7768static void ipr_pci_perm_failure(struct pci_dev *pdev)
7769{
7770 unsigned long flags = 0;
7771 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7772
7773 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7774 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7775 ioa_cfg->sdt_state = ABORT_DUMP;
7776 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7777 ioa_cfg->in_ioa_bringdown = 1;
Kleber S. Souza6ff63892009-05-04 10:41:02 -03007778 ioa_cfg->allow_cmds = 0;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007779 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7780 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7781}
7782
7783/**
7784 * ipr_pci_error_detected - Called when a PCI error is detected.
7785 * @pdev: PCI device struct
7786 * @state: PCI channel state
7787 *
7788 * Description: Called when a PCI error is detected.
7789 *
7790 * Return value:
7791 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7792 */
7793static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7794 pci_channel_state_t state)
7795{
7796 switch (state) {
7797 case pci_channel_io_frozen:
7798 ipr_pci_frozen(pdev);
7799 return PCI_ERS_RESULT_NEED_RESET;
7800 case pci_channel_io_perm_failure:
7801 ipr_pci_perm_failure(pdev);
7802 return PCI_ERS_RESULT_DISCONNECT;
7803 break;
7804 default:
7805 break;
7806 }
7807 return PCI_ERS_RESULT_NEED_RESET;
7808}
7809
7810/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7812 * @ioa_cfg: ioa cfg struct
7813 *
7814 * Description: This is the second phase of adapter intialization
7815 * This function takes care of initilizing the adapter to the point
7816 * where it can accept new commands.
7817
7818 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02007819 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07007820 **/
7821static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7822{
7823 int rc = 0;
7824 unsigned long host_lock_flags = 0;
7825
7826 ENTER;
7827 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7828 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06007829 if (ioa_cfg->needs_hard_reset) {
7830 ioa_cfg->needs_hard_reset = 0;
7831 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7832 } else
7833 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7834 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007835
7836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7837 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7838 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7839
7840 if (ioa_cfg->ioa_is_dead) {
7841 rc = -EIO;
7842 } else if (ipr_invalid_adapter(ioa_cfg)) {
7843 if (!ipr_testmode)
7844 rc = -EIO;
7845
7846 dev_err(&ioa_cfg->pdev->dev,
7847 "Adapter not supported in this hardware configuration.\n");
7848 }
7849
7850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7851
7852 LEAVE;
7853 return rc;
7854}
7855
7856/**
7857 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7858 * @ioa_cfg: ioa config struct
7859 *
7860 * Return value:
7861 * none
7862 **/
7863static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7864{
7865 int i;
7866
7867 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7868 if (ioa_cfg->ipr_cmnd_list[i])
7869 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7870 ioa_cfg->ipr_cmnd_list[i],
7871 ioa_cfg->ipr_cmnd_list_dma[i]);
7872
7873 ioa_cfg->ipr_cmnd_list[i] = NULL;
7874 }
7875
7876 if (ioa_cfg->ipr_cmd_pool)
7877 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7878
7879 ioa_cfg->ipr_cmd_pool = NULL;
7880}
7881
7882/**
7883 * ipr_free_mem - Frees memory allocated for an adapter
7884 * @ioa_cfg: ioa cfg struct
7885 *
7886 * Return value:
7887 * nothing
7888 **/
7889static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7890{
7891 int i;
7892
7893 kfree(ioa_cfg->res_entries);
7894 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7895 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7896 ipr_free_cmd_blks(ioa_cfg);
7897 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7898 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007899 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7900 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007901 ioa_cfg->cfg_table_dma);
7902
7903 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7904 pci_free_consistent(ioa_cfg->pdev,
7905 sizeof(struct ipr_hostrcb),
7906 ioa_cfg->hostrcb[i],
7907 ioa_cfg->hostrcb_dma[i]);
7908 }
7909
7910 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007911 kfree(ioa_cfg->trace);
7912}
7913
7914/**
7915 * ipr_free_all_resources - Free all allocated resources for an adapter.
7916 * @ipr_cmd: ipr command struct
7917 *
7918 * This function frees all allocated resources for the
7919 * specified adapter.
7920 *
7921 * Return value:
7922 * none
7923 **/
7924static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7925{
7926 struct pci_dev *pdev = ioa_cfg->pdev;
7927
7928 ENTER;
7929 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer5a9ef252009-01-23 09:17:35 -08007930 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931 iounmap(ioa_cfg->hdw_dma_regs);
7932 pci_release_regions(pdev);
7933 ipr_free_mem(ioa_cfg);
7934 scsi_host_put(ioa_cfg->host);
7935 pci_disable_device(pdev);
7936 LEAVE;
7937}
7938
7939/**
7940 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7941 * @ioa_cfg: ioa config struct
7942 *
7943 * Return value:
7944 * 0 on success / -ENOMEM on allocation failure
7945 **/
7946static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7947{
7948 struct ipr_cmnd *ipr_cmd;
7949 struct ipr_ioarcb *ioarcb;
7950 dma_addr_t dma_addr;
7951 int i;
7952
7953 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
Wayne Boyera32c0552010-02-19 13:23:36 -08007954 sizeof(struct ipr_cmnd), 16, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007955
7956 if (!ioa_cfg->ipr_cmd_pool)
7957 return -ENOMEM;
7958
7959 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Christoph Lametere94b1762006-12-06 20:33:17 -08007960 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961
7962 if (!ipr_cmd) {
7963 ipr_free_cmd_blks(ioa_cfg);
7964 return -ENOMEM;
7965 }
7966
7967 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7968 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7969 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7970
7971 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08007972 ipr_cmd->dma_addr = dma_addr;
7973 if (ioa_cfg->sis64)
7974 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
7975 else
7976 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7977
Linus Torvalds1da177e2005-04-16 15:20:36 -07007978 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08007979 if (ioa_cfg->sis64) {
7980 ioarcb->u.sis64_addr_data.data_ioadl_addr =
7981 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7982 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7983 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7984 } else {
7985 ioarcb->write_ioadl_addr =
7986 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
7987 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7988 ioarcb->ioasa_host_pci_addr =
7989 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007991 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7992 ipr_cmd->cmd_index = i;
7993 ipr_cmd->ioa_cfg = ioa_cfg;
7994 ipr_cmd->sense_buffer_dma = dma_addr +
7995 offsetof(struct ipr_cmnd, sense_buffer);
7996
7997 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7998 }
7999
8000 return 0;
8001}
8002
8003/**
8004 * ipr_alloc_mem - Allocate memory for an adapter
8005 * @ioa_cfg: ioa config struct
8006 *
8007 * Return value:
8008 * 0 on success / non-zero for error
8009 **/
8010static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8011{
8012 struct pci_dev *pdev = ioa_cfg->pdev;
8013 int i, rc = -ENOMEM;
8014
8015 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008016 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008017 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008018
8019 if (!ioa_cfg->res_entries)
8020 goto out;
8021
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008022 if (ioa_cfg->sis64) {
8023 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8024 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8025 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8026 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8027 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8028 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8029 }
8030
8031 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008033 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008035
8036 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8037 sizeof(struct ipr_misc_cbs),
8038 &ioa_cfg->vpd_cbs_dma);
8039
8040 if (!ioa_cfg->vpd_cbs)
8041 goto out_free_res_entries;
8042
8043 if (ipr_alloc_cmd_blks(ioa_cfg))
8044 goto out_free_vpd_cbs;
8045
8046 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8047 sizeof(u32) * IPR_NUM_CMD_BLKS,
8048 &ioa_cfg->host_rrq_dma);
8049
8050 if (!ioa_cfg->host_rrq)
8051 goto out_ipr_free_cmd_blocks;
8052
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008053 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8054 ioa_cfg->cfg_table_size,
8055 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008057 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058 goto out_free_host_rrq;
8059
8060 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8061 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8062 sizeof(struct ipr_hostrcb),
8063 &ioa_cfg->hostrcb_dma[i]);
8064
8065 if (!ioa_cfg->hostrcb[i])
8066 goto out_free_hostrcb_dma;
8067
8068 ioa_cfg->hostrcb[i]->hostrcb_dma =
8069 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06008070 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008071 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8072 }
8073
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008074 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008075 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8076
8077 if (!ioa_cfg->trace)
8078 goto out_free_hostrcb_dma;
8079
Linus Torvalds1da177e2005-04-16 15:20:36 -07008080 rc = 0;
8081out:
8082 LEAVE;
8083 return rc;
8084
8085out_free_hostrcb_dma:
8086 while (i-- > 0) {
8087 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8088 ioa_cfg->hostrcb[i],
8089 ioa_cfg->hostrcb_dma[i]);
8090 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008091 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8092 ioa_cfg->u.cfg_table,
8093 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094out_free_host_rrq:
8095 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8096 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8097out_ipr_free_cmd_blocks:
8098 ipr_free_cmd_blks(ioa_cfg);
8099out_free_vpd_cbs:
8100 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8101 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8102out_free_res_entries:
8103 kfree(ioa_cfg->res_entries);
8104 goto out;
8105}
8106
8107/**
8108 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8109 * @ioa_cfg: ioa config struct
8110 *
8111 * Return value:
8112 * none
8113 **/
8114static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8115{
8116 int i;
8117
8118 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8119 ioa_cfg->bus_attr[i].bus = i;
8120 ioa_cfg->bus_attr[i].qas_enabled = 0;
8121 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8122 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8123 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8124 else
8125 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8126 }
8127}
8128
8129/**
8130 * ipr_init_ioa_cfg - Initialize IOA config struct
8131 * @ioa_cfg: ioa config struct
8132 * @host: scsi host struct
8133 * @pdev: PCI dev struct
8134 *
8135 * Return value:
8136 * none
8137 **/
8138static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8139 struct Scsi_Host *host, struct pci_dev *pdev)
8140{
8141 const struct ipr_interrupt_offsets *p;
8142 struct ipr_interrupts *t;
8143 void __iomem *base;
8144
8145 ioa_cfg->host = host;
8146 ioa_cfg->pdev = pdev;
8147 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06008148 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008149 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8150 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8151 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8152 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8153 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8154 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8155 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8156 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8157
8158 INIT_LIST_HEAD(&ioa_cfg->free_q);
8159 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8160 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8161 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8162 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8163 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00008164 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008165 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008166 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008167 ioa_cfg->sdt_state = INACTIVE;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008168 if (ipr_enable_cache)
8169 ioa_cfg->cache_state = CACHE_ENABLED;
8170 else
8171 ioa_cfg->cache_state = CACHE_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008172
8173 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008174 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008175
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008176 if (ioa_cfg->sis64) {
8177 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8178 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8179 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8180 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8181 } else {
8182 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8183 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8184 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8185 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008187 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8188 host->unique_id = host->host_no;
8189 host->max_cmd_len = IPR_MAX_CDB_LEN;
8190 pci_set_drvdata(pdev, ioa_cfg);
8191
8192 p = &ioa_cfg->chip_cfg->regs;
8193 t = &ioa_cfg->regs;
8194 base = ioa_cfg->hdw_dma_regs;
8195
8196 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8197 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8198 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8199 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8200 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8201 t->ioarrin_reg = base + p->ioarrin_reg;
8202 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8203 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8204 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8205}
8206
8207/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008208 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07008209 * @dev_id: PCI device id struct
8210 *
8211 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008212 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008213 **/
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008214static const struct ipr_chip_t * __devinit
8215ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008216{
8217 int i;
8218
Linus Torvalds1da177e2005-04-16 15:20:36 -07008219 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8220 if (ipr_chip[i].vendor == dev_id->vendor &&
8221 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008222 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223 return NULL;
8224}
8225
8226/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07008227 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8228 * @pdev: PCI device struct
8229 *
8230 * Description: Simply set the msi_received flag to 1 indicating that
8231 * Message Signaled Interrupts are supported.
8232 *
8233 * Return value:
8234 * 0 on success / non-zero on failure
8235 **/
8236static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8237{
8238 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8239 unsigned long lock_flags = 0;
8240 irqreturn_t rc = IRQ_HANDLED;
8241
8242 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8243
8244 ioa_cfg->msi_received = 1;
8245 wake_up(&ioa_cfg->msi_wait_q);
8246
8247 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8248 return rc;
8249}
8250
8251/**
8252 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8253 * @pdev: PCI device struct
8254 *
8255 * Description: The return value from pci_enable_msi() can not always be
8256 * trusted. This routine sets up and initiates a test interrupt to determine
8257 * if the interrupt is received via the ipr_test_intr() service routine.
8258 * If the tests fails, the driver will fall back to LSI.
8259 *
8260 * Return value:
8261 * 0 on success / non-zero on failure
8262 **/
8263static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8264 struct pci_dev *pdev)
8265{
8266 int rc;
8267 volatile u32 int_reg;
8268 unsigned long lock_flags = 0;
8269
8270 ENTER;
8271
8272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8273 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8274 ioa_cfg->msi_received = 0;
8275 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8276 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
8277 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8279
8280 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8281 if (rc) {
8282 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8283 return rc;
8284 } else if (ipr_debug)
8285 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8286
8287 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
8288 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8289 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8290 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8291
8292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8293 if (!ioa_cfg->msi_received) {
8294 /* MSI test failed */
8295 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8296 rc = -EOPNOTSUPP;
8297 } else if (ipr_debug)
8298 dev_info(&pdev->dev, "MSI test succeeded.\n");
8299
8300 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8301
8302 free_irq(pdev->irq, ioa_cfg);
8303
8304 LEAVE;
8305
8306 return rc;
8307}
8308
8309/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8311 * @pdev: PCI device struct
8312 * @dev_id: PCI device id struct
8313 *
8314 * Return value:
8315 * 0 on success / non-zero on failure
8316 **/
8317static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8318 const struct pci_device_id *dev_id)
8319{
8320 struct ipr_ioa_cfg *ioa_cfg;
8321 struct Scsi_Host *host;
8322 unsigned long ipr_regs_pci;
8323 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07008324 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05008325 volatile u32 mask, uproc, interrupts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326
8327 ENTER;
8328
8329 if ((rc = pci_enable_device(pdev))) {
8330 dev_err(&pdev->dev, "Cannot enable adapter\n");
8331 goto out;
8332 }
8333
8334 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8335
8336 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8337
8338 if (!host) {
8339 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8340 rc = -ENOMEM;
8341 goto out_disable;
8342 }
8343
8344 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8345 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Brian King35a39692006-09-25 12:39:20 -05008346 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8347 sata_port_info.flags, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008348
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008349 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008350
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008351 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008352 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8353 dev_id->vendor, dev_id->device);
8354 goto out_scsi_host_put;
8355 }
8356
Wayne Boyera32c0552010-02-19 13:23:36 -08008357 /* set SIS 32 or SIS 64 */
8358 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008359 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8360
Brian King5469cb52007-03-29 12:42:40 -05008361 if (ipr_transop_timeout)
8362 ioa_cfg->transop_timeout = ipr_transop_timeout;
8363 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8364 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8365 else
8366 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8367
Auke Kok44c10132007-06-08 15:46:36 -07008368 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05008369
Linus Torvalds1da177e2005-04-16 15:20:36 -07008370 ipr_regs_pci = pci_resource_start(pdev, 0);
8371
8372 rc = pci_request_regions(pdev, IPR_NAME);
8373 if (rc < 0) {
8374 dev_err(&pdev->dev,
8375 "Couldn't register memory range of registers\n");
8376 goto out_scsi_host_put;
8377 }
8378
Arjan van de Ven25729a72008-09-28 16:18:02 -07008379 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008380
8381 if (!ipr_regs) {
8382 dev_err(&pdev->dev,
8383 "Couldn't map memory range of registers\n");
8384 rc = -ENOMEM;
8385 goto out_release_regions;
8386 }
8387
8388 ioa_cfg->hdw_dma_regs = ipr_regs;
8389 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8390 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8391
8392 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8393
8394 pci_set_master(pdev);
8395
Wayne Boyera32c0552010-02-19 13:23:36 -08008396 if (ioa_cfg->sis64) {
8397 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8398 if (rc < 0) {
8399 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8400 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8401 }
8402
8403 } else
8404 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8405
Linus Torvalds1da177e2005-04-16 15:20:36 -07008406 if (rc < 0) {
8407 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8408 goto cleanup_nomem;
8409 }
8410
8411 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8412 ioa_cfg->chip_cfg->cache_line_size);
8413
8414 if (rc != PCIBIOS_SUCCESSFUL) {
8415 dev_err(&pdev->dev, "Write of cache line size failed\n");
8416 rc = -EIO;
8417 goto cleanup_nomem;
8418 }
8419
Wayne Boyer95fecd92009-06-16 15:13:28 -07008420 /* Enable MSI style interrupts if they are supported. */
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008421 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07008422 rc = ipr_test_msi(ioa_cfg, pdev);
8423 if (rc == -EOPNOTSUPP)
8424 pci_disable_msi(pdev);
8425 else if (rc)
8426 goto out_msi_disable;
8427 else
8428 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8429 } else if (ipr_debug)
8430 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8431
Linus Torvalds1da177e2005-04-16 15:20:36 -07008432 /* Save away PCI config space for use following IOA reset */
8433 rc = pci_save_state(pdev);
8434
8435 if (rc != PCIBIOS_SUCCESSFUL) {
8436 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8437 rc = -EIO;
8438 goto cleanup_nomem;
8439 }
8440
8441 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8442 goto cleanup_nomem;
8443
8444 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8445 goto cleanup_nomem;
8446
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008447 if (ioa_cfg->sis64)
8448 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8449 + ((sizeof(struct ipr_config_table_entry64)
8450 * ioa_cfg->max_devs_supported)));
8451 else
8452 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8453 + ((sizeof(struct ipr_config_table_entry)
8454 * ioa_cfg->max_devs_supported)));
8455
Linus Torvalds1da177e2005-04-16 15:20:36 -07008456 rc = ipr_alloc_mem(ioa_cfg);
8457 if (rc < 0) {
8458 dev_err(&pdev->dev,
8459 "Couldn't allocate enough memory for device driver!\n");
8460 goto cleanup_nomem;
8461 }
8462
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008463 /*
8464 * If HRRQ updated interrupt is not masked, or reset alert is set,
8465 * the card is in an unknown state and needs a hard reset
8466 */
8467 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
Brian King473b1e82007-05-02 10:44:11 -05008468 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008469 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
8470 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8471 ioa_cfg->needs_hard_reset = 1;
Brian King473b1e82007-05-02 10:44:11 -05008472 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8473 ioa_cfg->needs_hard_reset = 1;
8474 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8475 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008476
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008478 rc = request_irq(pdev->irq, ipr_isr,
8479 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8480 IPR_NAME, ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481
8482 if (rc) {
8483 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8484 pdev->irq, rc);
8485 goto cleanup_nolog;
8486 }
8487
Brian King463fc692007-05-07 17:09:05 -05008488 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8489 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8490 ioa_cfg->needs_warm_reset = 1;
8491 ioa_cfg->reset = ipr_reset_slot_reset;
8492 } else
8493 ioa_cfg->reset = ipr_reset_start_bist;
8494
Linus Torvalds1da177e2005-04-16 15:20:36 -07008495 spin_lock(&ipr_driver_lock);
8496 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8497 spin_unlock(&ipr_driver_lock);
8498
8499 LEAVE;
8500out:
8501 return rc;
8502
8503cleanup_nolog:
8504 ipr_free_mem(ioa_cfg);
8505cleanup_nomem:
8506 iounmap(ipr_regs);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008507out_msi_disable:
8508 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509out_release_regions:
8510 pci_release_regions(pdev);
8511out_scsi_host_put:
8512 scsi_host_put(host);
8513out_disable:
8514 pci_disable_device(pdev);
8515 goto out;
8516}
8517
8518/**
8519 * ipr_scan_vsets - Scans for VSET devices
8520 * @ioa_cfg: ioa config struct
8521 *
8522 * Description: Since the VSET resources do not follow SAM in that we can have
8523 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8524 *
8525 * Return value:
8526 * none
8527 **/
8528static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8529{
8530 int target, lun;
8531
8532 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8533 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8534 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8535}
8536
8537/**
8538 * ipr_initiate_ioa_bringdown - Bring down an adapter
8539 * @ioa_cfg: ioa config struct
8540 * @shutdown_type: shutdown type
8541 *
8542 * Description: This function will initiate bringing down the adapter.
8543 * This consists of issuing an IOA shutdown to the adapter
8544 * to flush the cache, and running BIST.
8545 * If the caller needs to wait on the completion of the reset,
8546 * the caller must sleep on the reset_wait_q.
8547 *
8548 * Return value:
8549 * none
8550 **/
8551static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8552 enum ipr_shutdown_type shutdown_type)
8553{
8554 ENTER;
8555 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8556 ioa_cfg->sdt_state = ABORT_DUMP;
8557 ioa_cfg->reset_retries = 0;
8558 ioa_cfg->in_ioa_bringdown = 1;
8559 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8560 LEAVE;
8561}
8562
8563/**
8564 * __ipr_remove - Remove a single adapter
8565 * @pdev: pci device struct
8566 *
8567 * Adapter hot plug remove entry point.
8568 *
8569 * Return value:
8570 * none
8571 **/
8572static void __ipr_remove(struct pci_dev *pdev)
8573{
8574 unsigned long host_lock_flags = 0;
8575 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8576 ENTER;
8577
8578 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008579 while(ioa_cfg->in_reset_reload) {
8580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8581 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8582 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8583 }
8584
Linus Torvalds1da177e2005-04-16 15:20:36 -07008585 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8586
8587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8588 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05008589 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008590 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8591
8592 spin_lock(&ipr_driver_lock);
8593 list_del(&ioa_cfg->queue);
8594 spin_unlock(&ipr_driver_lock);
8595
8596 if (ioa_cfg->sdt_state == ABORT_DUMP)
8597 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8598 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8599
8600 ipr_free_all_resources(ioa_cfg);
8601
8602 LEAVE;
8603}
8604
8605/**
8606 * ipr_remove - IOA hot plug remove entry point
8607 * @pdev: pci device struct
8608 *
8609 * Adapter hot plug remove entry point.
8610 *
8611 * Return value:
8612 * none
8613 **/
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008614static void __devexit ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008615{
8616 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8617
8618 ENTER;
8619
Tony Jonesee959b02008-02-22 00:13:36 +01008620 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008621 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01008622 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008623 &ipr_dump_attr);
8624 scsi_remove_host(ioa_cfg->host);
8625
8626 __ipr_remove(pdev);
8627
8628 LEAVE;
8629}
8630
8631/**
8632 * ipr_probe - Adapter hot plug add entry point
8633 *
8634 * Return value:
8635 * 0 on success / non-zero on failure
8636 **/
8637static int __devinit ipr_probe(struct pci_dev *pdev,
8638 const struct pci_device_id *dev_id)
8639{
8640 struct ipr_ioa_cfg *ioa_cfg;
8641 int rc;
8642
8643 rc = ipr_probe_ioa(pdev, dev_id);
8644
8645 if (rc)
8646 return rc;
8647
8648 ioa_cfg = pci_get_drvdata(pdev);
8649 rc = ipr_probe_ioa_part2(ioa_cfg);
8650
8651 if (rc) {
8652 __ipr_remove(pdev);
8653 return rc;
8654 }
8655
8656 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8657
8658 if (rc) {
8659 __ipr_remove(pdev);
8660 return rc;
8661 }
8662
Tony Jonesee959b02008-02-22 00:13:36 +01008663 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008664 &ipr_trace_attr);
8665
8666 if (rc) {
8667 scsi_remove_host(ioa_cfg->host);
8668 __ipr_remove(pdev);
8669 return rc;
8670 }
8671
Tony Jonesee959b02008-02-22 00:13:36 +01008672 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008673 &ipr_dump_attr);
8674
8675 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01008676 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008677 &ipr_trace_attr);
8678 scsi_remove_host(ioa_cfg->host);
8679 __ipr_remove(pdev);
8680 return rc;
8681 }
8682
8683 scsi_scan_host(ioa_cfg->host);
8684 ipr_scan_vsets(ioa_cfg);
8685 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8686 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06008687 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008688 schedule_work(&ioa_cfg->work_q);
8689 return 0;
8690}
8691
8692/**
8693 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008694 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07008695 *
8696 * This function is invoked upon system shutdown/reboot. It will issue
8697 * an adapter shutdown to the adapter to flush the write cache.
8698 *
8699 * Return value:
8700 * none
8701 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008702static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008703{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008704 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008705 unsigned long lock_flags = 0;
8706
8707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008708 while(ioa_cfg->in_reset_reload) {
8709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8710 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8711 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8712 }
8713
Linus Torvalds1da177e2005-04-16 15:20:36 -07008714 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8716 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8717}
8718
8719static struct pci_device_id ipr_pci_table[] __devinitdata = {
8720 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008722 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008724 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008726 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008730 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008731 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008732 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008734 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05008735 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8736 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008737 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008739 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8741 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008742 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008743 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8744 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8749 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06008750 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008751 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8752 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05008754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8755 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05008756 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8757 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8758 IPR_USE_LONG_TRANSOP_TIMEOUT },
8759 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8761 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05008762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -05008763 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008764 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06008765 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008766 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06008767 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008768 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008769 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8770 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008771 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8773 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05008774 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8775 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8776 IPR_USE_LONG_TRANSOP_TIMEOUT },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008777 { }
8778};
8779MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8780
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008781static struct pci_error_handlers ipr_err_handler = {
8782 .error_detected = ipr_pci_error_detected,
8783 .slot_reset = ipr_pci_slot_reset,
8784};
8785
Linus Torvalds1da177e2005-04-16 15:20:36 -07008786static struct pci_driver ipr_driver = {
8787 .name = IPR_NAME,
8788 .id_table = ipr_pci_table,
8789 .probe = ipr_probe,
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008790 .remove = __devexit_p(ipr_remove),
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008791 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008792 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008793};
8794
8795/**
8796 * ipr_init - Module entry point
8797 *
8798 * Return value:
8799 * 0 on success / negative value on failure
8800 **/
8801static int __init ipr_init(void)
8802{
8803 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8804 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8805
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -07008806 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008807}
8808
8809/**
8810 * ipr_exit - Module unload
8811 *
8812 * Module unload entry point.
8813 *
8814 * Return value:
8815 * none
8816 **/
8817static void __exit ipr_exit(void)
8818{
8819 pci_unregister_driver(&ipr_driver);
8820}
8821
8822module_init(ipr_init);
8823module_exit(ipr_exit);