blob: d9f208343a24cb5124a5ba304603630ca5727033 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050073#include <linux/libata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include "ipr.h"
83
84/*
85 * Global Data
86 */
87static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
88static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050092static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.com62275042005-11-01 17:01:14 -060093static unsigned int ipr_enable_cache = 1;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060094static unsigned int ipr_debug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -060099 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 .mailbox = 0x0042C,
101 .cache_line_size = 0x20,
102 {
103 .set_interrupt_mask_reg = 0x0022C,
104 .clr_interrupt_mask_reg = 0x00230,
105 .sense_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_reg = 0x00228,
107 .sense_interrupt_reg = 0x00224,
108 .ioarrin_reg = 0x00404,
109 .sense_uproc_interrupt_reg = 0x00214,
110 .set_uproc_interrupt_reg = 0x00214,
111 .clr_uproc_interrupt_reg = 0x00218
112 }
113 },
114 { /* Snipe and Scamp */
115 .mailbox = 0x0052C,
116 .cache_line_size = 0x20,
117 {
118 .set_interrupt_mask_reg = 0x00288,
119 .clr_interrupt_mask_reg = 0x0028C,
120 .sense_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_reg = 0x00284,
122 .sense_interrupt_reg = 0x00280,
123 .ioarrin_reg = 0x00504,
124 .sense_uproc_interrupt_reg = 0x00290,
125 .set_uproc_interrupt_reg = 0x00290,
126 .clr_uproc_interrupt_reg = 0x00294
127 }
128 },
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
brking@us.ibm.com86f51432005-11-01 17:02:42 -0600134 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
Brian King60e74862006-11-21 10:28:10 -0600136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
138 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
139};
140
141static int ipr_max_bus_speeds [] = {
142 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
143};
144
145MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
146MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
147module_param_named(max_speed, ipr_max_speed, uint, 0);
148MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
149module_param_named(log_level, ipr_log_level, uint, 0);
150MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
151module_param_named(testmode, ipr_testmode, int, 0);
152MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
153module_param_named(fastfail, ipr_fastfail, int, 0);
154MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
155module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
156MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
brking@us.ibm.com62275042005-11-01 17:01:14 -0600157module_param_named(enable_cache, ipr_enable_cache, int, 0);
158MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600159module_param_named(debug, ipr_debug, int, 0);
160MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161MODULE_LICENSE("GPL");
162MODULE_VERSION(IPR_DRIVER_VERSION);
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164/* A constant array of IOASCs/URCs/Error Messages */
165static const
166struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 "8155: An unknown error was received"},
169 {0x00330000, 0, 0,
170 "Soft underlength error"},
171 {0x005A0000, 0, 0,
172 "Command to be cancelled not found"},
173 {0x00808000, 0, 0,
174 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500178 "4101: Soft device bus fabric error"},
Brian King933916f2007-03-29 12:43:30 -0500179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500202 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 "8009: Impending cache battery pack failure"},
207 {0x02040400, 0, 0,
208 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500209 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
210 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 {0x023F0000, 0, 0,
212 "Synchronization required"},
213 {0x024E0000, 0, 0,
214 "No ready, IOA shutdown"},
215 {0x025A0000, 0, 0,
216 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500217 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 "3020: Storage subsystem configuration error"},
219 {0x03110B00, 0, 0,
220 "FFF5: Medium error, data unreadable, recommend reassign"},
221 {0x03110C00, 0, 0,
222 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500223 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500225 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500227 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500229 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 "3109: IOA timed out a device command"},
231 {0x04088000, 0, 0,
232 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500233 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500234 "4100: Hard device bus fabric error"},
Brian King933916f2007-03-29 12:43:30 -0500235 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500237 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500239 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 "9002: IOA reserved area LRC error"},
Brian King933916f2007-03-29 12:43:30 -0500241 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500243 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500245 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500247 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500249 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500251 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 "8150: PCI bus error"},
253 {0x04430000, 1, 0,
254 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500255 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500257 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500259 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500261 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 "8151: IOA microcode error"},
263 {0x04448500, 0, 0,
264 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500265 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500267 {0x04448700, 0, 0,
268 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 {0x04490000, 0, 0,
270 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500271 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500273 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500275 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500277 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500279 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500281 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500282 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500283 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500285 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600286 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500287 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500288 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500289 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500290 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500291 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500292 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500293 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500294 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500295 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 "FFF4: Command to logical unit failed"},
297 {0x05240000, 1, 0,
298 "Illegal request, invalid request type or request packet"},
299 {0x05250000, 0, 0,
300 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600301 {0x05258000, 0, 0,
302 "Illegal request, commands not allowed to this device"},
303 {0x05258100, 0, 0,
304 "Illegal request, command not allowed to a secondary adapter"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 {0x05260000, 0, 0,
306 "Illegal request, invalid field in parameter list"},
307 {0x05260100, 0, 0,
308 "Illegal request, parameter not supported"},
309 {0x05260200, 0, 0,
310 "Illegal request, parameter value invalid"},
311 {0x052C0000, 0, 0,
312 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600313 {0x052C8000, 1, 0,
314 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500315 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500317 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500319 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500320 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500321 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 "FFFB: SCSI bus was reset"},
323 {0x06290500, 0, 0,
324 "FFFE: SCSI bus transition to single ended"},
325 {0x06290600, 0, 0,
326 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500327 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500329 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500331 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500333 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600334 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500335 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500337 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500339 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500341 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600342 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500343 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500344 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500345 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500346 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500347 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500348 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500349 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500350 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500351 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500352 "4050: Enclosure does not support a required multipath function"},
Brian King933916f2007-03-29 12:43:30 -0500353 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500355 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500357 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500359 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600360 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500361 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600362 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500363 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500365 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
366 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500367 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500368 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500369 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500370 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 {0x07270000, 0, 0,
372 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500373 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500375 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500377 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500379 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500381 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500383 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500385 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500387 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500389 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500391 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500393 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500395 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500397 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500399 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500401 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500403 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500405 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500407 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500409 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500411 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 "9063: Maximum number of functional arrays has been exceeded"},
413 {0x0B260000, 0, 0,
414 "Aborted command, invalid descriptor"},
415 {0x0B5A0000, 0, 0,
416 "Command terminated by host"}
417};
418
419static const struct ipr_ses_table_entry ipr_ses_table[] = {
420 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
421 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
422 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
423 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
424 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
425 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
426 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
427 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
428 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
429 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
430 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
431 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
432 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
433};
434
435/*
436 * Function Prototypes
437 */
438static int ipr_reset_alert(struct ipr_cmnd *);
439static void ipr_process_ccn(struct ipr_cmnd *);
440static void ipr_process_error(struct ipr_cmnd *);
441static void ipr_reset_ioa_job(struct ipr_cmnd *);
442static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
443 enum ipr_shutdown_type);
444
445#ifdef CONFIG_SCSI_IPR_TRACE
446/**
447 * ipr_trc_hook - Add a trace entry to the driver trace
448 * @ipr_cmd: ipr command struct
449 * @type: trace type
450 * @add_data: additional data
451 *
452 * Return value:
453 * none
454 **/
455static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
456 u8 type, u32 add_data)
457{
458 struct ipr_trace_entry *trace_entry;
459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
460
461 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
462 trace_entry->time = jiffies;
463 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
464 trace_entry->type = type;
Brian King35a39692006-09-25 12:39:20 -0500465 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
466 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
468 trace_entry->u.add_data = add_data;
469}
470#else
471#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
472#endif
473
474/**
475 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
476 * @ipr_cmd: ipr command struct
477 *
478 * Return value:
479 * none
480 **/
481static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
482{
483 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
484 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Brian King51b1c7e2007-03-29 12:43:50 -0500485 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
488 ioarcb->write_data_transfer_length = 0;
489 ioarcb->read_data_transfer_length = 0;
490 ioarcb->write_ioadl_len = 0;
491 ioarcb->read_ioadl_len = 0;
Brian King51b1c7e2007-03-29 12:43:50 -0500492 ioarcb->write_ioadl_addr =
493 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
494 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 ioasa->ioasc = 0;
496 ioasa->residual_data_len = 0;
Brian King35a39692006-09-25 12:39:20 -0500497 ioasa->u.gata.status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500500 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 ipr_cmd->sense_buffer[0] = 0;
502 ipr_cmd->dma_use_sg = 0;
503}
504
505/**
506 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
507 * @ipr_cmd: ipr command struct
508 *
509 * Return value:
510 * none
511 **/
512static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
513{
514 ipr_reinit_ipr_cmnd(ipr_cmd);
515 ipr_cmd->u.scratch = 0;
516 ipr_cmd->sibling = NULL;
517 init_timer(&ipr_cmd->timer);
518}
519
520/**
521 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
522 * @ioa_cfg: ioa config struct
523 *
524 * Return value:
525 * pointer to ipr command struct
526 **/
527static
528struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
529{
530 struct ipr_cmnd *ipr_cmd;
531
532 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
533 list_del(&ipr_cmd->queue);
534 ipr_init_ipr_cmnd(ipr_cmd);
535
536 return ipr_cmd;
537}
538
539/**
540 * ipr_unmap_sglist - Unmap scatterlist if mapped
541 * @ioa_cfg: ioa config struct
542 * @ipr_cmd: ipr command struct
543 *
544 * Return value:
545 * nothing
546 **/
547static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
548 struct ipr_cmnd *ipr_cmd)
549{
550 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
551
552 if (ipr_cmd->dma_use_sg) {
553 if (scsi_cmd->use_sg > 0) {
554 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
555 scsi_cmd->use_sg,
556 scsi_cmd->sc_data_direction);
557 } else {
558 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
559 scsi_cmd->request_bufflen,
560 scsi_cmd->sc_data_direction);
561 }
562 }
563}
564
565/**
566 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
567 * @ioa_cfg: ioa config struct
568 * @clr_ints: interrupts to clear
569 *
570 * This function masks all interrupts on the adapter, then clears the
571 * interrupts specified in the mask
572 *
573 * Return value:
574 * none
575 **/
576static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
577 u32 clr_ints)
578{
579 volatile u32 int_reg;
580
581 /* Stop new interrupts */
582 ioa_cfg->allow_interrupts = 0;
583
584 /* Set interrupt mask to stop all new interrupts */
585 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
586
587 /* Clear any pending interrupts */
588 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
589 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
590}
591
592/**
593 * ipr_save_pcix_cmd_reg - Save PCI-X command register
594 * @ioa_cfg: ioa config struct
595 *
596 * Return value:
597 * 0 on success / -EIO on failure
598 **/
599static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
600{
601 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
602
Brian King7dce0e12007-01-23 11:25:30 -0600603 if (pcix_cmd_reg == 0)
604 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
607 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
608 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
609 return -EIO;
610 }
611
612 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
613 return 0;
614}
615
616/**
617 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
618 * @ioa_cfg: ioa config struct
619 *
620 * Return value:
621 * 0 on success / -EIO on failure
622 **/
623static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
624{
625 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
626
627 if (pcix_cmd_reg) {
628 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
629 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
630 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
631 return -EIO;
632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
634
635 return 0;
636}
637
638/**
Brian King35a39692006-09-25 12:39:20 -0500639 * ipr_sata_eh_done - done function for aborted SATA commands
640 * @ipr_cmd: ipr command struct
641 *
642 * This function is invoked for ops generated to SATA
643 * devices which are being aborted.
644 *
645 * Return value:
646 * none
647 **/
648static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
649{
650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
651 struct ata_queued_cmd *qc = ipr_cmd->qc;
652 struct ipr_sata_port *sata_port = qc->ap->private_data;
653
654 qc->err_mask |= AC_ERR_OTHER;
655 sata_port->ioasa.status |= ATA_BUSY;
656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
657 ata_qc_complete(qc);
658}
659
660/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 * ipr_scsi_eh_done - mid-layer done function for aborted ops
662 * @ipr_cmd: ipr command struct
663 *
664 * This function is invoked by the interrupt handler for
665 * ops generated by the SCSI mid-layer which are being aborted.
666 *
667 * Return value:
668 * none
669 **/
670static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
671{
672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
673 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
674
675 scsi_cmd->result |= (DID_ERROR << 16);
676
677 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
678 scsi_cmd->scsi_done(scsi_cmd);
679 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
680}
681
682/**
683 * ipr_fail_all_ops - Fails all outstanding ops.
684 * @ioa_cfg: ioa config struct
685 *
686 * This function fails all outstanding ops.
687 *
688 * Return value:
689 * none
690 **/
691static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
692{
693 struct ipr_cmnd *ipr_cmd, *temp;
694
695 ENTER;
696 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
697 list_del(&ipr_cmd->queue);
698
699 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
700 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
701
702 if (ipr_cmd->scsi_cmd)
703 ipr_cmd->done = ipr_scsi_eh_done;
Brian King35a39692006-09-25 12:39:20 -0500704 else if (ipr_cmd->qc)
705 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
708 del_timer(&ipr_cmd->timer);
709 ipr_cmd->done(ipr_cmd);
710 }
711
712 LEAVE;
713}
714
715/**
716 * ipr_do_req - Send driver initiated requests.
717 * @ipr_cmd: ipr command struct
718 * @done: done function
719 * @timeout_func: timeout function
720 * @timeout: timeout value
721 *
722 * This function sends the specified command to the adapter with the
723 * timeout given. The done function is invoked on command completion.
724 *
725 * Return value:
726 * none
727 **/
728static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
729 void (*done) (struct ipr_cmnd *),
730 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
731{
732 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
733
734 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
735
736 ipr_cmd->done = done;
737
738 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
739 ipr_cmd->timer.expires = jiffies + timeout;
740 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
741
742 add_timer(&ipr_cmd->timer);
743
744 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
745
746 mb();
747 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
748 ioa_cfg->regs.ioarrin_reg);
749}
750
751/**
752 * ipr_internal_cmd_done - Op done function for an internally generated op.
753 * @ipr_cmd: ipr command struct
754 *
755 * This function is the op done function for an internally generated,
756 * blocking op. It simply wakes the sleeping thread.
757 *
758 * Return value:
759 * none
760 **/
761static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
762{
763 if (ipr_cmd->sibling)
764 ipr_cmd->sibling = NULL;
765 else
766 complete(&ipr_cmd->completion);
767}
768
769/**
770 * ipr_send_blocking_cmd - Send command and sleep on its completion.
771 * @ipr_cmd: ipr command struct
772 * @timeout_func: function to invoke if command times out
773 * @timeout: timeout
774 *
775 * Return value:
776 * none
777 **/
778static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
779 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
780 u32 timeout)
781{
782 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
783
784 init_completion(&ipr_cmd->completion);
785 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
786
787 spin_unlock_irq(ioa_cfg->host->host_lock);
788 wait_for_completion(&ipr_cmd->completion);
789 spin_lock_irq(ioa_cfg->host->host_lock);
790}
791
792/**
793 * ipr_send_hcam - Send an HCAM to the adapter.
794 * @ioa_cfg: ioa config struct
795 * @type: HCAM type
796 * @hostrcb: hostrcb struct
797 *
798 * This function will send a Host Controlled Async command to the adapter.
799 * If HCAMs are currently not allowed to be issued to the adapter, it will
800 * place the hostrcb on the free queue.
801 *
802 * Return value:
803 * none
804 **/
805static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
806 struct ipr_hostrcb *hostrcb)
807{
808 struct ipr_cmnd *ipr_cmd;
809 struct ipr_ioarcb *ioarcb;
810
811 if (ioa_cfg->allow_cmds) {
812 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
813 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
814 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
815
816 ipr_cmd->u.hostrcb = hostrcb;
817 ioarcb = &ipr_cmd->ioarcb;
818
819 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
820 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
821 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
822 ioarcb->cmd_pkt.cdb[1] = type;
823 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
824 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
825
826 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
827 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
828 ipr_cmd->ioadl[0].flags_and_data_len =
829 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
830 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
831
832 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
833 ipr_cmd->done = ipr_process_ccn;
834 else
835 ipr_cmd->done = ipr_process_error;
836
837 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
838
839 mb();
840 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
841 ioa_cfg->regs.ioarrin_reg);
842 } else {
843 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
844 }
845}
846
847/**
848 * ipr_init_res_entry - Initialize a resource entry struct.
849 * @res: resource entry struct
850 *
851 * Return value:
852 * none
853 **/
854static void ipr_init_res_entry(struct ipr_resource_entry *res)
855{
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -0600856 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 res->in_erp = 0;
858 res->add_to_ml = 0;
859 res->del_from_ml = 0;
860 res->resetting_device = 0;
861 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -0500862 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863}
864
865/**
866 * ipr_handle_config_change - Handle a config change from the adapter
867 * @ioa_cfg: ioa config struct
868 * @hostrcb: hostrcb
869 *
870 * Return value:
871 * none
872 **/
873static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
874 struct ipr_hostrcb *hostrcb)
875{
876 struct ipr_resource_entry *res = NULL;
877 struct ipr_config_table_entry *cfgte;
878 u32 is_ndn = 1;
879
880 cfgte = &hostrcb->hcam.u.ccn.cfgte;
881
882 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
883 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
884 sizeof(cfgte->res_addr))) {
885 is_ndn = 0;
886 break;
887 }
888 }
889
890 if (is_ndn) {
891 if (list_empty(&ioa_cfg->free_res_q)) {
892 ipr_send_hcam(ioa_cfg,
893 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
894 hostrcb);
895 return;
896 }
897
898 res = list_entry(ioa_cfg->free_res_q.next,
899 struct ipr_resource_entry, queue);
900
901 list_del(&res->queue);
902 ipr_init_res_entry(res);
903 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
904 }
905
906 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
907
908 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
909 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 res->del_from_ml = 1;
Brian King1121b792006-03-29 09:37:16 -0600911 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (ioa_cfg->allow_ml_add_del)
913 schedule_work(&ioa_cfg->work_q);
914 } else
915 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
916 } else if (!res->sdev) {
917 res->add_to_ml = 1;
918 if (ioa_cfg->allow_ml_add_del)
919 schedule_work(&ioa_cfg->work_q);
920 }
921
922 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
923}
924
925/**
926 * ipr_process_ccn - Op done function for a CCN.
927 * @ipr_cmd: ipr command struct
928 *
929 * This function is the op done function for a configuration
930 * change notification host controlled async from the adapter.
931 *
932 * Return value:
933 * none
934 **/
935static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
936{
937 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
938 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
939 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
940
941 list_del(&hostrcb->queue);
942 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
943
944 if (ioasc) {
945 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
946 dev_err(&ioa_cfg->pdev->dev,
947 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
948
949 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
950 } else {
951 ipr_handle_config_change(ioa_cfg, hostrcb);
952 }
953}
954
955/**
Brian King8cf093e2007-04-26 16:00:14 -0500956 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
957 * @i: index into buffer
958 * @buf: string to modify
959 *
960 * This function will strip all trailing whitespace, pad the end
961 * of the string with a single space, and NULL terminate the string.
962 *
963 * Return value:
964 * new length of string
965 **/
966static int strip_and_pad_whitespace(int i, char *buf)
967{
968 while (i && buf[i] == ' ')
969 i--;
970 buf[i+1] = ' ';
971 buf[i+2] = '\0';
972 return i + 2;
973}
974
975/**
976 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
977 * @prefix: string to print at start of printk
978 * @hostrcb: hostrcb pointer
979 * @vpd: vendor/product id/sn struct
980 *
981 * Return value:
982 * none
983 **/
984static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
985 struct ipr_vpd *vpd)
986{
987 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
988 int i = 0;
989
990 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
991 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
992
993 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
994 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
995
996 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
997 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
998
999 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1000}
1001
1002/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001004 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 *
1006 * Return value:
1007 * none
1008 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001009static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010{
1011 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1012 + IPR_SERIAL_NUM_LEN];
1013
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001014 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1015 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 IPR_PROD_ID_LEN);
1017 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1018 ipr_err("Vendor/Product ID: %s\n", buffer);
1019
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001020 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1022 ipr_err(" Serial Number: %s\n", buffer);
1023}
1024
1025/**
Brian King8cf093e2007-04-26 16:00:14 -05001026 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1027 * @prefix: string to print at start of printk
1028 * @hostrcb: hostrcb pointer
1029 * @vpd: vendor/product id/sn/wwn struct
1030 *
1031 * Return value:
1032 * none
1033 **/
1034static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1035 struct ipr_ext_vpd *vpd)
1036{
1037 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1038 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1039 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1040}
1041
1042/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001043 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1044 * @vpd: vendor/product id/sn/wwn struct
1045 *
1046 * Return value:
1047 * none
1048 **/
1049static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1050{
1051 ipr_log_vpd(&vpd->vpd);
1052 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1053 be32_to_cpu(vpd->wwid[1]));
1054}
1055
1056/**
1057 * ipr_log_enhanced_cache_error - Log a cache error.
1058 * @ioa_cfg: ioa config struct
1059 * @hostrcb: hostrcb struct
1060 *
1061 * Return value:
1062 * none
1063 **/
1064static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1065 struct ipr_hostrcb *hostrcb)
1066{
1067 struct ipr_hostrcb_type_12_error *error =
1068 &hostrcb->hcam.u.error.u.type_12_error;
1069
1070 ipr_err("-----Current Configuration-----\n");
1071 ipr_err("Cache Directory Card Information:\n");
1072 ipr_log_ext_vpd(&error->ioa_vpd);
1073 ipr_err("Adapter Card Information:\n");
1074 ipr_log_ext_vpd(&error->cfc_vpd);
1075
1076 ipr_err("-----Expected Configuration-----\n");
1077 ipr_err("Cache Directory Card Information:\n");
1078 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1079 ipr_err("Adapter Card Information:\n");
1080 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1081
1082 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1083 be32_to_cpu(error->ioa_data[0]),
1084 be32_to_cpu(error->ioa_data[1]),
1085 be32_to_cpu(error->ioa_data[2]));
1086}
1087
1088/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 * ipr_log_cache_error - Log a cache error.
1090 * @ioa_cfg: ioa config struct
1091 * @hostrcb: hostrcb struct
1092 *
1093 * Return value:
1094 * none
1095 **/
1096static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1097 struct ipr_hostrcb *hostrcb)
1098{
1099 struct ipr_hostrcb_type_02_error *error =
1100 &hostrcb->hcam.u.error.u.type_02_error;
1101
1102 ipr_err("-----Current Configuration-----\n");
1103 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001104 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001106 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 ipr_err("-----Expected Configuration-----\n");
1109 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001110 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001112 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
1114 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1115 be32_to_cpu(error->ioa_data[0]),
1116 be32_to_cpu(error->ioa_data[1]),
1117 be32_to_cpu(error->ioa_data[2]));
1118}
1119
1120/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001121 * ipr_log_enhanced_config_error - Log a configuration error.
1122 * @ioa_cfg: ioa config struct
1123 * @hostrcb: hostrcb struct
1124 *
1125 * Return value:
1126 * none
1127 **/
1128static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1129 struct ipr_hostrcb *hostrcb)
1130{
1131 int errors_logged, i;
1132 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1133 struct ipr_hostrcb_type_13_error *error;
1134
1135 error = &hostrcb->hcam.u.error.u.type_13_error;
1136 errors_logged = be32_to_cpu(error->errors_logged);
1137
1138 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1139 be32_to_cpu(error->errors_detected), errors_logged);
1140
1141 dev_entry = error->dev;
1142
1143 for (i = 0; i < errors_logged; i++, dev_entry++) {
1144 ipr_err_separator;
1145
1146 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1147 ipr_log_ext_vpd(&dev_entry->vpd);
1148
1149 ipr_err("-----New Device Information-----\n");
1150 ipr_log_ext_vpd(&dev_entry->new_vpd);
1151
1152 ipr_err("Cache Directory Card Information:\n");
1153 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1154
1155 ipr_err("Adapter Card Information:\n");
1156 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1157 }
1158}
1159
1160/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 * ipr_log_config_error - Log a configuration error.
1162 * @ioa_cfg: ioa config struct
1163 * @hostrcb: hostrcb struct
1164 *
1165 * Return value:
1166 * none
1167 **/
1168static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1169 struct ipr_hostrcb *hostrcb)
1170{
1171 int errors_logged, i;
1172 struct ipr_hostrcb_device_data_entry *dev_entry;
1173 struct ipr_hostrcb_type_03_error *error;
1174
1175 error = &hostrcb->hcam.u.error.u.type_03_error;
1176 errors_logged = be32_to_cpu(error->errors_logged);
1177
1178 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1179 be32_to_cpu(error->errors_detected), errors_logged);
1180
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001181 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
1183 for (i = 0; i < errors_logged; i++, dev_entry++) {
1184 ipr_err_separator;
1185
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001186 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001187 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
1189 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001190 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001193 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
1195 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001196 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1199 be32_to_cpu(dev_entry->ioa_data[0]),
1200 be32_to_cpu(dev_entry->ioa_data[1]),
1201 be32_to_cpu(dev_entry->ioa_data[2]),
1202 be32_to_cpu(dev_entry->ioa_data[3]),
1203 be32_to_cpu(dev_entry->ioa_data[4]));
1204 }
1205}
1206
1207/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001208 * ipr_log_enhanced_array_error - Log an array configuration error.
1209 * @ioa_cfg: ioa config struct
1210 * @hostrcb: hostrcb struct
1211 *
1212 * Return value:
1213 * none
1214 **/
1215static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1216 struct ipr_hostrcb *hostrcb)
1217{
1218 int i, num_entries;
1219 struct ipr_hostrcb_type_14_error *error;
1220 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1221 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1222
1223 error = &hostrcb->hcam.u.error.u.type_14_error;
1224
1225 ipr_err_separator;
1226
1227 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1228 error->protection_level,
1229 ioa_cfg->host->host_no,
1230 error->last_func_vset_res_addr.bus,
1231 error->last_func_vset_res_addr.target,
1232 error->last_func_vset_res_addr.lun);
1233
1234 ipr_err_separator;
1235
1236 array_entry = error->array_member;
1237 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1238 sizeof(error->array_member));
1239
1240 for (i = 0; i < num_entries; i++, array_entry++) {
1241 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1242 continue;
1243
1244 if (be32_to_cpu(error->exposed_mode_adn) == i)
1245 ipr_err("Exposed Array Member %d:\n", i);
1246 else
1247 ipr_err("Array Member %d:\n", i);
1248
1249 ipr_log_ext_vpd(&array_entry->vpd);
1250 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1251 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1252 "Expected Location");
1253
1254 ipr_err_separator;
1255 }
1256}
1257
1258/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 * ipr_log_array_error - Log an array configuration error.
1260 * @ioa_cfg: ioa config struct
1261 * @hostrcb: hostrcb struct
1262 *
1263 * Return value:
1264 * none
1265 **/
1266static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1267 struct ipr_hostrcb *hostrcb)
1268{
1269 int i;
1270 struct ipr_hostrcb_type_04_error *error;
1271 struct ipr_hostrcb_array_data_entry *array_entry;
1272 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1273
1274 error = &hostrcb->hcam.u.error.u.type_04_error;
1275
1276 ipr_err_separator;
1277
1278 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1279 error->protection_level,
1280 ioa_cfg->host->host_no,
1281 error->last_func_vset_res_addr.bus,
1282 error->last_func_vset_res_addr.target,
1283 error->last_func_vset_res_addr.lun);
1284
1285 ipr_err_separator;
1286
1287 array_entry = error->array_member;
1288
1289 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001290 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 continue;
1292
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001293 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001295 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001298 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001300 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1301 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1302 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 ipr_err_separator;
1305
1306 if (i == 9)
1307 array_entry = error->array_member2;
1308 else
1309 array_entry++;
1310 }
1311}
1312
1313/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001314 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001315 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001316 * @data: IOA error data
1317 * @len: data length
1318 *
1319 * Return value:
1320 * none
1321 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001322static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001323{
1324 int i;
1325
1326 if (len == 0)
1327 return;
1328
Brian Kingac719ab2006-11-21 10:28:42 -06001329 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1330 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1331
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001332 for (i = 0; i < len / 4; i += 4) {
1333 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1334 be32_to_cpu(data[i]),
1335 be32_to_cpu(data[i+1]),
1336 be32_to_cpu(data[i+2]),
1337 be32_to_cpu(data[i+3]));
1338 }
1339}
1340
1341/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001342 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1343 * @ioa_cfg: ioa config struct
1344 * @hostrcb: hostrcb struct
1345 *
1346 * Return value:
1347 * none
1348 **/
1349static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1350 struct ipr_hostrcb *hostrcb)
1351{
1352 struct ipr_hostrcb_type_17_error *error;
1353
1354 error = &hostrcb->hcam.u.error.u.type_17_error;
1355 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
Brian King8cf093e2007-04-26 16:00:14 -05001356 strstrip(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001357
Brian King8cf093e2007-04-26 16:00:14 -05001358 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1359 be32_to_cpu(hostrcb->hcam.u.error.prc));
1360 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001361 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001362 be32_to_cpu(hostrcb->hcam.length) -
1363 (offsetof(struct ipr_hostrcb_error, u) +
1364 offsetof(struct ipr_hostrcb_type_17_error, data)));
1365}
1366
1367/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001368 * ipr_log_dual_ioa_error - Log a dual adapter error.
1369 * @ioa_cfg: ioa config struct
1370 * @hostrcb: hostrcb struct
1371 *
1372 * Return value:
1373 * none
1374 **/
1375static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1376 struct ipr_hostrcb *hostrcb)
1377{
1378 struct ipr_hostrcb_type_07_error *error;
1379
1380 error = &hostrcb->hcam.u.error.u.type_07_error;
1381 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
Brian King8cf093e2007-04-26 16:00:14 -05001382 strstrip(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001383
Brian King8cf093e2007-04-26 16:00:14 -05001384 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1385 be32_to_cpu(hostrcb->hcam.u.error.prc));
1386 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001387 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001388 be32_to_cpu(hostrcb->hcam.length) -
1389 (offsetof(struct ipr_hostrcb_error, u) +
1390 offsetof(struct ipr_hostrcb_type_07_error, data)));
1391}
1392
Brian King49dc6a12006-11-21 10:28:35 -06001393static const struct {
1394 u8 active;
1395 char *desc;
1396} path_active_desc[] = {
1397 { IPR_PATH_NO_INFO, "Path" },
1398 { IPR_PATH_ACTIVE, "Active path" },
1399 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1400};
1401
1402static const struct {
1403 u8 state;
1404 char *desc;
1405} path_state_desc[] = {
1406 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1407 { IPR_PATH_HEALTHY, "is healthy" },
1408 { IPR_PATH_DEGRADED, "is degraded" },
1409 { IPR_PATH_FAILED, "is failed" }
1410};
1411
1412/**
1413 * ipr_log_fabric_path - Log a fabric path error
1414 * @hostrcb: hostrcb struct
1415 * @fabric: fabric descriptor
1416 *
1417 * Return value:
1418 * none
1419 **/
1420static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1421 struct ipr_hostrcb_fabric_desc *fabric)
1422{
1423 int i, j;
1424 u8 path_state = fabric->path_state;
1425 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1426 u8 state = path_state & IPR_PATH_STATE_MASK;
1427
1428 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1429 if (path_active_desc[i].active != active)
1430 continue;
1431
1432 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1433 if (path_state_desc[j].state != state)
1434 continue;
1435
1436 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1437 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1438 path_active_desc[i].desc, path_state_desc[j].desc,
1439 fabric->ioa_port);
1440 } else if (fabric->cascaded_expander == 0xff) {
1441 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1442 path_active_desc[i].desc, path_state_desc[j].desc,
1443 fabric->ioa_port, fabric->phy);
1444 } else if (fabric->phy == 0xff) {
1445 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1446 path_active_desc[i].desc, path_state_desc[j].desc,
1447 fabric->ioa_port, fabric->cascaded_expander);
1448 } else {
1449 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1450 path_active_desc[i].desc, path_state_desc[j].desc,
1451 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1452 }
1453 return;
1454 }
1455 }
1456
1457 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1458 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1459}
1460
1461static const struct {
1462 u8 type;
1463 char *desc;
1464} path_type_desc[] = {
1465 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1466 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1467 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1468 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1469};
1470
1471static const struct {
1472 u8 status;
1473 char *desc;
1474} path_status_desc[] = {
1475 { IPR_PATH_CFG_NO_PROB, "Functional" },
1476 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1477 { IPR_PATH_CFG_FAILED, "Failed" },
1478 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1479 { IPR_PATH_NOT_DETECTED, "Missing" },
1480 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1481};
1482
1483static const char *link_rate[] = {
1484 "unknown",
1485 "disabled",
1486 "phy reset problem",
1487 "spinup hold",
1488 "port selector",
1489 "unknown",
1490 "unknown",
1491 "unknown",
1492 "1.5Gbps",
1493 "3.0Gbps",
1494 "unknown",
1495 "unknown",
1496 "unknown",
1497 "unknown",
1498 "unknown",
1499 "unknown"
1500};
1501
1502/**
1503 * ipr_log_path_elem - Log a fabric path element.
1504 * @hostrcb: hostrcb struct
1505 * @cfg: fabric path element struct
1506 *
1507 * Return value:
1508 * none
1509 **/
1510static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1511 struct ipr_hostrcb_config_element *cfg)
1512{
1513 int i, j;
1514 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1515 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1516
1517 if (type == IPR_PATH_CFG_NOT_EXIST)
1518 return;
1519
1520 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1521 if (path_type_desc[i].type != type)
1522 continue;
1523
1524 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1525 if (path_status_desc[j].status != status)
1526 continue;
1527
1528 if (type == IPR_PATH_CFG_IOA_PORT) {
1529 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1530 path_status_desc[j].desc, path_type_desc[i].desc,
1531 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1532 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1533 } else {
1534 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1535 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1536 path_status_desc[j].desc, path_type_desc[i].desc,
1537 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1538 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1539 } else if (cfg->cascaded_expander == 0xff) {
1540 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1541 "WWN=%08X%08X\n", path_status_desc[j].desc,
1542 path_type_desc[i].desc, cfg->phy,
1543 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1544 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1545 } else if (cfg->phy == 0xff) {
1546 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1547 "WWN=%08X%08X\n", path_status_desc[j].desc,
1548 path_type_desc[i].desc, cfg->cascaded_expander,
1549 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1550 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1551 } else {
1552 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1553 "WWN=%08X%08X\n", path_status_desc[j].desc,
1554 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1555 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1556 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1557 }
1558 }
1559 return;
1560 }
1561 }
1562
1563 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1564 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1565 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1566 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1567}
1568
1569/**
1570 * ipr_log_fabric_error - Log a fabric error.
1571 * @ioa_cfg: ioa config struct
1572 * @hostrcb: hostrcb struct
1573 *
1574 * Return value:
1575 * none
1576 **/
1577static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1578 struct ipr_hostrcb *hostrcb)
1579{
1580 struct ipr_hostrcb_type_20_error *error;
1581 struct ipr_hostrcb_fabric_desc *fabric;
1582 struct ipr_hostrcb_config_element *cfg;
1583 int i, add_len;
1584
1585 error = &hostrcb->hcam.u.error.u.type_20_error;
1586 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1587 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1588
1589 add_len = be32_to_cpu(hostrcb->hcam.length) -
1590 (offsetof(struct ipr_hostrcb_error, u) +
1591 offsetof(struct ipr_hostrcb_type_20_error, desc));
1592
1593 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1594 ipr_log_fabric_path(hostrcb, fabric);
1595 for_each_fabric_cfg(fabric, cfg)
1596 ipr_log_path_elem(hostrcb, cfg);
1597
1598 add_len -= be16_to_cpu(fabric->length);
1599 fabric = (struct ipr_hostrcb_fabric_desc *)
1600 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1601 }
1602
Brian Kingac719ab2006-11-21 10:28:42 -06001603 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06001604}
1605
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001606/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 * ipr_log_generic_error - Log an adapter error.
1608 * @ioa_cfg: ioa config struct
1609 * @hostrcb: hostrcb struct
1610 *
1611 * Return value:
1612 * none
1613 **/
1614static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1615 struct ipr_hostrcb *hostrcb)
1616{
Brian Kingac719ab2006-11-21 10:28:42 -06001617 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001618 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619}
1620
1621/**
1622 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1623 * @ioasc: IOASC
1624 *
1625 * This function will return the index of into the ipr_error_table
1626 * for the specified IOASC. If the IOASC is not in the table,
1627 * 0 will be returned, which points to the entry used for unknown errors.
1628 *
1629 * Return value:
1630 * index into the ipr_error_table
1631 **/
1632static u32 ipr_get_error(u32 ioasc)
1633{
1634 int i;
1635
1636 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05001637 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 return i;
1639
1640 return 0;
1641}
1642
1643/**
1644 * ipr_handle_log_data - Log an adapter error.
1645 * @ioa_cfg: ioa config struct
1646 * @hostrcb: hostrcb struct
1647 *
1648 * This function logs an adapter error to the system.
1649 *
1650 * Return value:
1651 * none
1652 **/
1653static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1654 struct ipr_hostrcb *hostrcb)
1655{
1656 u32 ioasc;
1657 int error_index;
1658
1659 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1660 return;
1661
1662 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1663 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1664
1665 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1666
1667 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1668 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1669 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1670 scsi_report_bus_reset(ioa_cfg->host,
1671 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1672 }
1673
1674 error_index = ipr_get_error(ioasc);
1675
1676 if (!ipr_error_table[error_index].log_hcam)
1677 return;
1678
Brian King49dc6a12006-11-21 10:28:35 -06001679 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 /* Set indication we have logged an error */
1682 ioa_cfg->errors_logged++;
1683
Brian King933916f2007-03-29 12:43:30 -05001684 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001686 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1687 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 case IPR_HOST_RCB_OVERLAY_ID_2:
1691 ipr_log_cache_error(ioa_cfg, hostrcb);
1692 break;
1693 case IPR_HOST_RCB_OVERLAY_ID_3:
1694 ipr_log_config_error(ioa_cfg, hostrcb);
1695 break;
1696 case IPR_HOST_RCB_OVERLAY_ID_4:
1697 case IPR_HOST_RCB_OVERLAY_ID_6:
1698 ipr_log_array_error(ioa_cfg, hostrcb);
1699 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001700 case IPR_HOST_RCB_OVERLAY_ID_7:
1701 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1702 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001703 case IPR_HOST_RCB_OVERLAY_ID_12:
1704 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1705 break;
1706 case IPR_HOST_RCB_OVERLAY_ID_13:
1707 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1708 break;
1709 case IPR_HOST_RCB_OVERLAY_ID_14:
1710 case IPR_HOST_RCB_OVERLAY_ID_16:
1711 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1712 break;
1713 case IPR_HOST_RCB_OVERLAY_ID_17:
1714 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1715 break;
Brian King49dc6a12006-11-21 10:28:35 -06001716 case IPR_HOST_RCB_OVERLAY_ID_20:
1717 ipr_log_fabric_error(ioa_cfg, hostrcb);
1718 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06001719 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06001722 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 break;
1724 }
1725}
1726
1727/**
1728 * ipr_process_error - Op done function for an adapter error log.
1729 * @ipr_cmd: ipr command struct
1730 *
1731 * This function is the op done function for an error log host
1732 * controlled async from the adapter. It will log the error and
1733 * send the HCAM back to the adapter.
1734 *
1735 * Return value:
1736 * none
1737 **/
1738static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1739{
1740 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1741 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1742 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Brian King65f56472007-04-26 16:00:12 -05001743 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
1745 list_del(&hostrcb->queue);
1746 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1747
1748 if (!ioasc) {
1749 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05001750 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
1751 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1753 dev_err(&ioa_cfg->pdev->dev,
1754 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1755 }
1756
1757 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1758}
1759
1760/**
1761 * ipr_timeout - An internally generated op has timed out.
1762 * @ipr_cmd: ipr command struct
1763 *
1764 * This function blocks host requests and initiates an
1765 * adapter reset.
1766 *
1767 * Return value:
1768 * none
1769 **/
1770static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1771{
1772 unsigned long lock_flags = 0;
1773 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1774
1775 ENTER;
1776 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1777
1778 ioa_cfg->errors_logged++;
1779 dev_err(&ioa_cfg->pdev->dev,
1780 "Adapter being reset due to command timeout.\n");
1781
1782 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1783 ioa_cfg->sdt_state = GET_DUMP;
1784
1785 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1786 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1787
1788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1789 LEAVE;
1790}
1791
1792/**
1793 * ipr_oper_timeout - Adapter timed out transitioning to operational
1794 * @ipr_cmd: ipr command struct
1795 *
1796 * This function blocks host requests and initiates an
1797 * adapter reset.
1798 *
1799 * Return value:
1800 * none
1801 **/
1802static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1803{
1804 unsigned long lock_flags = 0;
1805 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1806
1807 ENTER;
1808 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1809
1810 ioa_cfg->errors_logged++;
1811 dev_err(&ioa_cfg->pdev->dev,
1812 "Adapter timed out transitioning to operational.\n");
1813
1814 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1815 ioa_cfg->sdt_state = GET_DUMP;
1816
1817 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1818 if (ipr_fastfail)
1819 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1820 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1821 }
1822
1823 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1824 LEAVE;
1825}
1826
1827/**
1828 * ipr_reset_reload - Reset/Reload the IOA
1829 * @ioa_cfg: ioa config struct
1830 * @shutdown_type: shutdown type
1831 *
1832 * This function resets the adapter and re-initializes it.
1833 * This function assumes that all new host commands have been stopped.
1834 * Return value:
1835 * SUCCESS / FAILED
1836 **/
1837static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1838 enum ipr_shutdown_type shutdown_type)
1839{
1840 if (!ioa_cfg->in_reset_reload)
1841 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1842
1843 spin_unlock_irq(ioa_cfg->host->host_lock);
1844 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1845 spin_lock_irq(ioa_cfg->host->host_lock);
1846
1847 /* If we got hit with a host reset while we were already resetting
1848 the adapter for some reason, and the reset failed. */
1849 if (ioa_cfg->ioa_is_dead) {
1850 ipr_trace;
1851 return FAILED;
1852 }
1853
1854 return SUCCESS;
1855}
1856
1857/**
1858 * ipr_find_ses_entry - Find matching SES in SES table
1859 * @res: resource entry struct of SES
1860 *
1861 * Return value:
1862 * pointer to SES table entry / NULL on failure
1863 **/
1864static const struct ipr_ses_table_entry *
1865ipr_find_ses_entry(struct ipr_resource_entry *res)
1866{
1867 int i, j, matches;
1868 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1869
1870 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1871 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1872 if (ste->compare_product_id_byte[j] == 'X') {
1873 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1874 matches++;
1875 else
1876 break;
1877 } else
1878 matches++;
1879 }
1880
1881 if (matches == IPR_PROD_ID_LEN)
1882 return ste;
1883 }
1884
1885 return NULL;
1886}
1887
1888/**
1889 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1890 * @ioa_cfg: ioa config struct
1891 * @bus: SCSI bus
1892 * @bus_width: bus width
1893 *
1894 * Return value:
1895 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1896 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1897 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1898 * max 160MHz = max 320MB/sec).
1899 **/
1900static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1901{
1902 struct ipr_resource_entry *res;
1903 const struct ipr_ses_table_entry *ste;
1904 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1905
1906 /* Loop through each config table entry in the config table buffer */
1907 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1908 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1909 continue;
1910
1911 if (bus != res->cfgte.res_addr.bus)
1912 continue;
1913
1914 if (!(ste = ipr_find_ses_entry(res)))
1915 continue;
1916
1917 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1918 }
1919
1920 return max_xfer_rate;
1921}
1922
1923/**
1924 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1925 * @ioa_cfg: ioa config struct
1926 * @max_delay: max delay in micro-seconds to wait
1927 *
1928 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1929 *
1930 * Return value:
1931 * 0 on success / other on failure
1932 **/
1933static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1934{
1935 volatile u32 pcii_reg;
1936 int delay = 1;
1937
1938 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1939 while (delay < max_delay) {
1940 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1941
1942 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1943 return 0;
1944
1945 /* udelay cannot be used if delay is more than a few milliseconds */
1946 if ((delay / 1000) > MAX_UDELAY_MS)
1947 mdelay(delay / 1000);
1948 else
1949 udelay(delay);
1950
1951 delay += delay;
1952 }
1953 return -EIO;
1954}
1955
1956/**
1957 * ipr_get_ldump_data_section - Dump IOA memory
1958 * @ioa_cfg: ioa config struct
1959 * @start_addr: adapter address to dump
1960 * @dest: destination kernel buffer
1961 * @length_in_words: length to dump in 4 byte words
1962 *
1963 * Return value:
1964 * 0 on success / -EIO on failure
1965 **/
1966static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1967 u32 start_addr,
1968 __be32 *dest, u32 length_in_words)
1969{
1970 volatile u32 temp_pcii_reg;
1971 int i, delay = 0;
1972
1973 /* Write IOA interrupt reg starting LDUMP state */
1974 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1975 ioa_cfg->regs.set_uproc_interrupt_reg);
1976
1977 /* Wait for IO debug acknowledge */
1978 if (ipr_wait_iodbg_ack(ioa_cfg,
1979 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1980 dev_err(&ioa_cfg->pdev->dev,
1981 "IOA dump long data transfer timeout\n");
1982 return -EIO;
1983 }
1984
1985 /* Signal LDUMP interlocked - clear IO debug ack */
1986 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1987 ioa_cfg->regs.clr_interrupt_reg);
1988
1989 /* Write Mailbox with starting address */
1990 writel(start_addr, ioa_cfg->ioa_mailbox);
1991
1992 /* Signal address valid - clear IOA Reset alert */
1993 writel(IPR_UPROCI_RESET_ALERT,
1994 ioa_cfg->regs.clr_uproc_interrupt_reg);
1995
1996 for (i = 0; i < length_in_words; i++) {
1997 /* Wait for IO debug acknowledge */
1998 if (ipr_wait_iodbg_ack(ioa_cfg,
1999 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2000 dev_err(&ioa_cfg->pdev->dev,
2001 "IOA dump short data transfer timeout\n");
2002 return -EIO;
2003 }
2004
2005 /* Read data from mailbox and increment destination pointer */
2006 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2007 dest++;
2008
2009 /* For all but the last word of data, signal data received */
2010 if (i < (length_in_words - 1)) {
2011 /* Signal dump data received - Clear IO debug Ack */
2012 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2013 ioa_cfg->regs.clr_interrupt_reg);
2014 }
2015 }
2016
2017 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2018 writel(IPR_UPROCI_RESET_ALERT,
2019 ioa_cfg->regs.set_uproc_interrupt_reg);
2020
2021 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2022 ioa_cfg->regs.clr_uproc_interrupt_reg);
2023
2024 /* Signal dump data received - Clear IO debug Ack */
2025 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2026 ioa_cfg->regs.clr_interrupt_reg);
2027
2028 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2029 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2030 temp_pcii_reg =
2031 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2032
2033 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2034 return 0;
2035
2036 udelay(10);
2037 delay += 10;
2038 }
2039
2040 return 0;
2041}
2042
2043#ifdef CONFIG_SCSI_IPR_DUMP
2044/**
2045 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2046 * @ioa_cfg: ioa config struct
2047 * @pci_address: adapter address
2048 * @length: length of data to copy
2049 *
2050 * Copy data from PCI adapter to kernel buffer.
2051 * Note: length MUST be a 4 byte multiple
2052 * Return value:
2053 * 0 on success / other on failure
2054 **/
2055static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2056 unsigned long pci_address, u32 length)
2057{
2058 int bytes_copied = 0;
2059 int cur_len, rc, rem_len, rem_page_len;
2060 __be32 *page;
2061 unsigned long lock_flags = 0;
2062 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2063
2064 while (bytes_copied < length &&
2065 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2066 if (ioa_dump->page_offset >= PAGE_SIZE ||
2067 ioa_dump->page_offset == 0) {
2068 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2069
2070 if (!page) {
2071 ipr_trace;
2072 return bytes_copied;
2073 }
2074
2075 ioa_dump->page_offset = 0;
2076 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2077 ioa_dump->next_page_index++;
2078 } else
2079 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2080
2081 rem_len = length - bytes_copied;
2082 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2083 cur_len = min(rem_len, rem_page_len);
2084
2085 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2086 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2087 rc = -EIO;
2088 } else {
2089 rc = ipr_get_ldump_data_section(ioa_cfg,
2090 pci_address + bytes_copied,
2091 &page[ioa_dump->page_offset / 4],
2092 (cur_len / sizeof(u32)));
2093 }
2094 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2095
2096 if (!rc) {
2097 ioa_dump->page_offset += cur_len;
2098 bytes_copied += cur_len;
2099 } else {
2100 ipr_trace;
2101 break;
2102 }
2103 schedule();
2104 }
2105
2106 return bytes_copied;
2107}
2108
2109/**
2110 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2111 * @hdr: dump entry header struct
2112 *
2113 * Return value:
2114 * nothing
2115 **/
2116static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2117{
2118 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2119 hdr->num_elems = 1;
2120 hdr->offset = sizeof(*hdr);
2121 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2122}
2123
2124/**
2125 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2126 * @ioa_cfg: ioa config struct
2127 * @driver_dump: driver dump struct
2128 *
2129 * Return value:
2130 * nothing
2131 **/
2132static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2133 struct ipr_driver_dump *driver_dump)
2134{
2135 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2136
2137 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2138 driver_dump->ioa_type_entry.hdr.len =
2139 sizeof(struct ipr_dump_ioa_type_entry) -
2140 sizeof(struct ipr_dump_entry_header);
2141 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2142 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2143 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2144 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2145 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2146 ucode_vpd->minor_release[1];
2147 driver_dump->hdr.num_entries++;
2148}
2149
2150/**
2151 * ipr_dump_version_data - Fill in the driver version in the dump.
2152 * @ioa_cfg: ioa config struct
2153 * @driver_dump: driver dump struct
2154 *
2155 * Return value:
2156 * nothing
2157 **/
2158static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2159 struct ipr_driver_dump *driver_dump)
2160{
2161 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2162 driver_dump->version_entry.hdr.len =
2163 sizeof(struct ipr_dump_version_entry) -
2164 sizeof(struct ipr_dump_entry_header);
2165 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2166 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2167 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2168 driver_dump->hdr.num_entries++;
2169}
2170
2171/**
2172 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2173 * @ioa_cfg: ioa config struct
2174 * @driver_dump: driver dump struct
2175 *
2176 * Return value:
2177 * nothing
2178 **/
2179static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2180 struct ipr_driver_dump *driver_dump)
2181{
2182 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2183 driver_dump->trace_entry.hdr.len =
2184 sizeof(struct ipr_dump_trace_entry) -
2185 sizeof(struct ipr_dump_entry_header);
2186 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2187 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2188 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2189 driver_dump->hdr.num_entries++;
2190}
2191
2192/**
2193 * ipr_dump_location_data - Fill in the IOA location in the dump.
2194 * @ioa_cfg: ioa config struct
2195 * @driver_dump: driver dump struct
2196 *
2197 * Return value:
2198 * nothing
2199 **/
2200static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2201 struct ipr_driver_dump *driver_dump)
2202{
2203 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2204 driver_dump->location_entry.hdr.len =
2205 sizeof(struct ipr_dump_location_entry) -
2206 sizeof(struct ipr_dump_entry_header);
2207 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2208 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2209 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
2210 driver_dump->hdr.num_entries++;
2211}
2212
2213/**
2214 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2215 * @ioa_cfg: ioa config struct
2216 * @dump: dump struct
2217 *
2218 * Return value:
2219 * nothing
2220 **/
2221static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2222{
2223 unsigned long start_addr, sdt_word;
2224 unsigned long lock_flags = 0;
2225 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2226 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2227 u32 num_entries, start_off, end_off;
2228 u32 bytes_to_copy, bytes_copied, rc;
2229 struct ipr_sdt *sdt;
2230 int i;
2231
2232 ENTER;
2233
2234 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2235
2236 if (ioa_cfg->sdt_state != GET_DUMP) {
2237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2238 return;
2239 }
2240
2241 start_addr = readl(ioa_cfg->ioa_mailbox);
2242
2243 if (!ipr_sdt_is_fmt2(start_addr)) {
2244 dev_err(&ioa_cfg->pdev->dev,
2245 "Invalid dump table format: %lx\n", start_addr);
2246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2247 return;
2248 }
2249
2250 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2251
2252 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2253
2254 /* Initialize the overall dump header */
2255 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2256 driver_dump->hdr.num_entries = 1;
2257 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2258 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2259 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2260 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2261
2262 ipr_dump_version_data(ioa_cfg, driver_dump);
2263 ipr_dump_location_data(ioa_cfg, driver_dump);
2264 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2265 ipr_dump_trace_data(ioa_cfg, driver_dump);
2266
2267 /* Update dump_header */
2268 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2269
2270 /* IOA Dump entry */
2271 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2272 ioa_dump->format = IPR_SDT_FMT2;
2273 ioa_dump->hdr.len = 0;
2274 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2275 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2276
2277 /* First entries in sdt are actually a list of dump addresses and
2278 lengths to gather the real dump data. sdt represents the pointer
2279 to the ioa generated dump table. Dump data will be extracted based
2280 on entries in this table */
2281 sdt = &ioa_dump->sdt;
2282
2283 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2284 sizeof(struct ipr_sdt) / sizeof(__be32));
2285
2286 /* Smart Dump table is ready to use and the first entry is valid */
2287 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
2288 dev_err(&ioa_cfg->pdev->dev,
2289 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2290 rc, be32_to_cpu(sdt->hdr.state));
2291 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2292 ioa_cfg->sdt_state = DUMP_OBTAINED;
2293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2294 return;
2295 }
2296
2297 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2298
2299 if (num_entries > IPR_NUM_SDT_ENTRIES)
2300 num_entries = IPR_NUM_SDT_ENTRIES;
2301
2302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2303
2304 for (i = 0; i < num_entries; i++) {
2305 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2306 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2307 break;
2308 }
2309
2310 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2311 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
2312 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2313 end_off = be32_to_cpu(sdt->entry[i].end_offset);
2314
2315 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
2316 bytes_to_copy = end_off - start_off;
2317 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2318 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2319 continue;
2320 }
2321
2322 /* Copy data from adapter to driver buffers */
2323 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2324 bytes_to_copy);
2325
2326 ioa_dump->hdr.len += bytes_copied;
2327
2328 if (bytes_copied != bytes_to_copy) {
2329 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2330 break;
2331 }
2332 }
2333 }
2334 }
2335
2336 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2337
2338 /* Update dump_header */
2339 driver_dump->hdr.len += ioa_dump->hdr.len;
2340 wmb();
2341 ioa_cfg->sdt_state = DUMP_OBTAINED;
2342 LEAVE;
2343}
2344
2345#else
2346#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2347#endif
2348
2349/**
2350 * ipr_release_dump - Free adapter dump memory
2351 * @kref: kref struct
2352 *
2353 * Return value:
2354 * nothing
2355 **/
2356static void ipr_release_dump(struct kref *kref)
2357{
2358 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2359 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2360 unsigned long lock_flags = 0;
2361 int i;
2362
2363 ENTER;
2364 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2365 ioa_cfg->dump = NULL;
2366 ioa_cfg->sdt_state = INACTIVE;
2367 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2368
2369 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2370 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2371
2372 kfree(dump);
2373 LEAVE;
2374}
2375
2376/**
2377 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00002378 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 *
2380 * Called at task level from a work thread. This function takes care
2381 * of adding and removing device from the mid-layer as configuration
2382 * changes are detected by the adapter.
2383 *
2384 * Return value:
2385 * nothing
2386 **/
David Howellsc4028952006-11-22 14:57:56 +00002387static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388{
2389 unsigned long lock_flags;
2390 struct ipr_resource_entry *res;
2391 struct scsi_device *sdev;
2392 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00002393 struct ipr_ioa_cfg *ioa_cfg =
2394 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 u8 bus, target, lun;
2396 int did_work;
2397
2398 ENTER;
2399 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2400
2401 if (ioa_cfg->sdt_state == GET_DUMP) {
2402 dump = ioa_cfg->dump;
2403 if (!dump) {
2404 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2405 return;
2406 }
2407 kref_get(&dump->kref);
2408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2409 ipr_get_ioa_dump(ioa_cfg, dump);
2410 kref_put(&dump->kref, ipr_release_dump);
2411
2412 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2413 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2414 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2416 return;
2417 }
2418
2419restart:
2420 do {
2421 did_work = 0;
2422 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2423 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2424 return;
2425 }
2426
2427 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2428 if (res->del_from_ml && res->sdev) {
2429 did_work = 1;
2430 sdev = res->sdev;
2431 if (!scsi_device_get(sdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2434 scsi_remove_device(sdev);
2435 scsi_device_put(sdev);
2436 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2437 }
2438 break;
2439 }
2440 }
2441 } while(did_work);
2442
2443 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2444 if (res->add_to_ml) {
2445 bus = res->cfgte.res_addr.bus;
2446 target = res->cfgte.res_addr.target;
2447 lun = res->cfgte.res_addr.lun;
Brian King1121b792006-03-29 09:37:16 -06002448 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2450 scsi_add_device(ioa_cfg->host, bus, target, lun);
2451 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2452 goto restart;
2453 }
2454 }
2455
2456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kay Sievers312c0042005-11-16 09:00:00 +01002457 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 LEAVE;
2459}
2460
2461#ifdef CONFIG_SCSI_IPR_TRACE
2462/**
2463 * ipr_read_trace - Dump the adapter trace
2464 * @kobj: kobject struct
2465 * @buf: buffer
2466 * @off: offset
2467 * @count: buffer size
2468 *
2469 * Return value:
2470 * number of bytes printed to buffer
2471 **/
2472static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2473 loff_t off, size_t count)
2474{
2475 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2476 struct Scsi_Host *shost = class_to_shost(cdev);
2477 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2478 unsigned long lock_flags = 0;
2479 int size = IPR_TRACE_SIZE;
2480 char *src = (char *)ioa_cfg->trace;
2481
2482 if (off > size)
2483 return 0;
2484 if (off + count > size) {
2485 size -= off;
2486 count = size;
2487 }
2488
2489 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2490 memcpy(buf, &src[off], count);
2491 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2492 return count;
2493}
2494
2495static struct bin_attribute ipr_trace_attr = {
2496 .attr = {
2497 .name = "trace",
2498 .mode = S_IRUGO,
2499 },
2500 .size = 0,
2501 .read = ipr_read_trace,
2502};
2503#endif
2504
brking@us.ibm.com62275042005-11-01 17:01:14 -06002505static const struct {
2506 enum ipr_cache_state state;
2507 char *name;
2508} cache_state [] = {
2509 { CACHE_NONE, "none" },
2510 { CACHE_DISABLED, "disabled" },
2511 { CACHE_ENABLED, "enabled" }
2512};
2513
2514/**
2515 * ipr_show_write_caching - Show the write caching attribute
2516 * @class_dev: class device struct
2517 * @buf: buffer
2518 *
2519 * Return value:
2520 * number of bytes printed to buffer
2521 **/
2522static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2523{
2524 struct Scsi_Host *shost = class_to_shost(class_dev);
2525 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2526 unsigned long lock_flags = 0;
2527 int i, len = 0;
2528
2529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2530 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2531 if (cache_state[i].state == ioa_cfg->cache_state) {
2532 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2533 break;
2534 }
2535 }
2536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2537 return len;
2538}
2539
2540
2541/**
2542 * ipr_store_write_caching - Enable/disable adapter write cache
2543 * @class_dev: class_device struct
2544 * @buf: buffer
2545 * @count: buffer size
2546 *
2547 * This function will enable/disable adapter write cache.
2548 *
2549 * Return value:
2550 * count on success / other on failure
2551 **/
2552static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2553 const char *buf, size_t count)
2554{
2555 struct Scsi_Host *shost = class_to_shost(class_dev);
2556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2557 unsigned long lock_flags = 0;
2558 enum ipr_cache_state new_state = CACHE_INVALID;
2559 int i;
2560
2561 if (!capable(CAP_SYS_ADMIN))
2562 return -EACCES;
2563 if (ioa_cfg->cache_state == CACHE_NONE)
2564 return -EINVAL;
2565
2566 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2567 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2568 new_state = cache_state[i].state;
2569 break;
2570 }
2571 }
2572
2573 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2574 return -EINVAL;
2575
2576 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2577 if (ioa_cfg->cache_state == new_state) {
2578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2579 return count;
2580 }
2581
2582 ioa_cfg->cache_state = new_state;
2583 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2584 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2585 if (!ioa_cfg->in_reset_reload)
2586 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2588 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2589
2590 return count;
2591}
2592
2593static struct class_device_attribute ipr_ioa_cache_attr = {
2594 .attr = {
2595 .name = "write_cache",
2596 .mode = S_IRUGO | S_IWUSR,
2597 },
2598 .show = ipr_show_write_caching,
2599 .store = ipr_store_write_caching
2600};
2601
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602/**
2603 * ipr_show_fw_version - Show the firmware version
2604 * @class_dev: class device struct
2605 * @buf: buffer
2606 *
2607 * Return value:
2608 * number of bytes printed to buffer
2609 **/
2610static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2611{
2612 struct Scsi_Host *shost = class_to_shost(class_dev);
2613 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2614 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2615 unsigned long lock_flags = 0;
2616 int len;
2617
2618 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2619 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2620 ucode_vpd->major_release, ucode_vpd->card_type,
2621 ucode_vpd->minor_release[0],
2622 ucode_vpd->minor_release[1]);
2623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2624 return len;
2625}
2626
2627static struct class_device_attribute ipr_fw_version_attr = {
2628 .attr = {
2629 .name = "fw_version",
2630 .mode = S_IRUGO,
2631 },
2632 .show = ipr_show_fw_version,
2633};
2634
2635/**
2636 * ipr_show_log_level - Show the adapter's error logging level
2637 * @class_dev: class device struct
2638 * @buf: buffer
2639 *
2640 * Return value:
2641 * number of bytes printed to buffer
2642 **/
2643static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2644{
2645 struct Scsi_Host *shost = class_to_shost(class_dev);
2646 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2647 unsigned long lock_flags = 0;
2648 int len;
2649
2650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2651 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2652 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2653 return len;
2654}
2655
2656/**
2657 * ipr_store_log_level - Change the adapter's error logging level
2658 * @class_dev: class device struct
2659 * @buf: buffer
2660 *
2661 * Return value:
2662 * number of bytes printed to buffer
2663 **/
2664static ssize_t ipr_store_log_level(struct class_device *class_dev,
2665 const char *buf, size_t count)
2666{
2667 struct Scsi_Host *shost = class_to_shost(class_dev);
2668 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2669 unsigned long lock_flags = 0;
2670
2671 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2672 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2674 return strlen(buf);
2675}
2676
2677static struct class_device_attribute ipr_log_level_attr = {
2678 .attr = {
2679 .name = "log_level",
2680 .mode = S_IRUGO | S_IWUSR,
2681 },
2682 .show = ipr_show_log_level,
2683 .store = ipr_store_log_level
2684};
2685
2686/**
2687 * ipr_store_diagnostics - IOA Diagnostics interface
2688 * @class_dev: class_device struct
2689 * @buf: buffer
2690 * @count: buffer size
2691 *
2692 * This function will reset the adapter and wait a reasonable
2693 * amount of time for any errors that the adapter might log.
2694 *
2695 * Return value:
2696 * count on success / other on failure
2697 **/
2698static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2699 const char *buf, size_t count)
2700{
2701 struct Scsi_Host *shost = class_to_shost(class_dev);
2702 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2703 unsigned long lock_flags = 0;
2704 int rc = count;
2705
2706 if (!capable(CAP_SYS_ADMIN))
2707 return -EACCES;
2708
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05002710 while(ioa_cfg->in_reset_reload) {
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2713 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2714 }
2715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 ioa_cfg->errors_logged = 0;
2717 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2718
2719 if (ioa_cfg->in_reset_reload) {
2720 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2722
2723 /* Wait for a second for any errors to be logged */
2724 msleep(1000);
2725 } else {
2726 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2727 return -EIO;
2728 }
2729
2730 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2731 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2732 rc = -EIO;
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734
2735 return rc;
2736}
2737
2738static struct class_device_attribute ipr_diagnostics_attr = {
2739 .attr = {
2740 .name = "run_diagnostics",
2741 .mode = S_IWUSR,
2742 },
2743 .store = ipr_store_diagnostics
2744};
2745
2746/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06002747 * ipr_show_adapter_state - Show the adapter's state
2748 * @class_dev: class device struct
2749 * @buf: buffer
2750 *
2751 * Return value:
2752 * number of bytes printed to buffer
2753 **/
2754static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2755{
2756 struct Scsi_Host *shost = class_to_shost(class_dev);
2757 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2758 unsigned long lock_flags = 0;
2759 int len;
2760
2761 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2762 if (ioa_cfg->ioa_is_dead)
2763 len = snprintf(buf, PAGE_SIZE, "offline\n");
2764 else
2765 len = snprintf(buf, PAGE_SIZE, "online\n");
2766 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2767 return len;
2768}
2769
2770/**
2771 * ipr_store_adapter_state - Change adapter state
2772 * @class_dev: class_device struct
2773 * @buf: buffer
2774 * @count: buffer size
2775 *
2776 * This function will change the adapter's state.
2777 *
2778 * Return value:
2779 * count on success / other on failure
2780 **/
2781static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2782 const char *buf, size_t count)
2783{
2784 struct Scsi_Host *shost = class_to_shost(class_dev);
2785 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2786 unsigned long lock_flags;
2787 int result = count;
2788
2789 if (!capable(CAP_SYS_ADMIN))
2790 return -EACCES;
2791
2792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2793 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2794 ioa_cfg->ioa_is_dead = 0;
2795 ioa_cfg->reset_retries = 0;
2796 ioa_cfg->in_ioa_bringdown = 0;
2797 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2798 }
2799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2800 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2801
2802 return result;
2803}
2804
2805static struct class_device_attribute ipr_ioa_state_attr = {
2806 .attr = {
2807 .name = "state",
2808 .mode = S_IRUGO | S_IWUSR,
2809 },
2810 .show = ipr_show_adapter_state,
2811 .store = ipr_store_adapter_state
2812};
2813
2814/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 * ipr_store_reset_adapter - Reset the adapter
2816 * @class_dev: class_device struct
2817 * @buf: buffer
2818 * @count: buffer size
2819 *
2820 * This function will reset the adapter.
2821 *
2822 * Return value:
2823 * count on success / other on failure
2824 **/
2825static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2826 const char *buf, size_t count)
2827{
2828 struct Scsi_Host *shost = class_to_shost(class_dev);
2829 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2830 unsigned long lock_flags;
2831 int result = count;
2832
2833 if (!capable(CAP_SYS_ADMIN))
2834 return -EACCES;
2835
2836 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2837 if (!ioa_cfg->in_reset_reload)
2838 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2840 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2841
2842 return result;
2843}
2844
2845static struct class_device_attribute ipr_ioa_reset_attr = {
2846 .attr = {
2847 .name = "reset_host",
2848 .mode = S_IWUSR,
2849 },
2850 .store = ipr_store_reset_adapter
2851};
2852
2853/**
2854 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2855 * @buf_len: buffer length
2856 *
2857 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2858 * list to use for microcode download
2859 *
2860 * Return value:
2861 * pointer to sglist / NULL on failure
2862 **/
2863static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2864{
2865 int sg_size, order, bsize_elem, num_elem, i, j;
2866 struct ipr_sglist *sglist;
2867 struct scatterlist *scatterlist;
2868 struct page *page;
2869
2870 /* Get the minimum size per scatter/gather element */
2871 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2872
2873 /* Get the actual size per element */
2874 order = get_order(sg_size);
2875
2876 /* Determine the actual number of bytes per element */
2877 bsize_elem = PAGE_SIZE * (1 << order);
2878
2879 /* Determine the actual number of sg entries needed */
2880 if (buf_len % bsize_elem)
2881 num_elem = (buf_len / bsize_elem) + 1;
2882 else
2883 num_elem = buf_len / bsize_elem;
2884
2885 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06002886 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 (sizeof(struct scatterlist) * (num_elem - 1)),
2888 GFP_KERNEL);
2889
2890 if (sglist == NULL) {
2891 ipr_trace;
2892 return NULL;
2893 }
2894
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 scatterlist = sglist->scatterlist;
2896
2897 sglist->order = order;
2898 sglist->num_sg = num_elem;
2899
2900 /* Allocate a bunch of sg elements */
2901 for (i = 0; i < num_elem; i++) {
2902 page = alloc_pages(GFP_KERNEL, order);
2903 if (!page) {
2904 ipr_trace;
2905
2906 /* Free up what we already allocated */
2907 for (j = i - 1; j >= 0; j--)
2908 __free_pages(scatterlist[j].page, order);
2909 kfree(sglist);
2910 return NULL;
2911 }
2912
2913 scatterlist[i].page = page;
2914 }
2915
2916 return sglist;
2917}
2918
2919/**
2920 * ipr_free_ucode_buffer - Frees a microcode download buffer
2921 * @p_dnld: scatter/gather list pointer
2922 *
2923 * Free a DMA'able ucode download buffer previously allocated with
2924 * ipr_alloc_ucode_buffer
2925 *
2926 * Return value:
2927 * nothing
2928 **/
2929static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2930{
2931 int i;
2932
2933 for (i = 0; i < sglist->num_sg; i++)
2934 __free_pages(sglist->scatterlist[i].page, sglist->order);
2935
2936 kfree(sglist);
2937}
2938
2939/**
2940 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2941 * @sglist: scatter/gather list pointer
2942 * @buffer: buffer pointer
2943 * @len: buffer length
2944 *
2945 * Copy a microcode image from a user buffer into a buffer allocated by
2946 * ipr_alloc_ucode_buffer
2947 *
2948 * Return value:
2949 * 0 on success / other on failure
2950 **/
2951static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2952 u8 *buffer, u32 len)
2953{
2954 int bsize_elem, i, result = 0;
2955 struct scatterlist *scatterlist;
2956 void *kaddr;
2957
2958 /* Determine the actual number of bytes per element */
2959 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2960
2961 scatterlist = sglist->scatterlist;
2962
2963 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2964 kaddr = kmap(scatterlist[i].page);
2965 memcpy(kaddr, buffer, bsize_elem);
2966 kunmap(scatterlist[i].page);
2967
2968 scatterlist[i].length = bsize_elem;
2969
2970 if (result != 0) {
2971 ipr_trace;
2972 return result;
2973 }
2974 }
2975
2976 if (len % bsize_elem) {
2977 kaddr = kmap(scatterlist[i].page);
2978 memcpy(kaddr, buffer, len % bsize_elem);
2979 kunmap(scatterlist[i].page);
2980
2981 scatterlist[i].length = len % bsize_elem;
2982 }
2983
2984 sglist->buffer_len = len;
2985 return result;
2986}
2987
2988/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002989 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 * @ipr_cmd: ipr command struct
2991 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002993 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06002996static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2997 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3000 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3001 struct scatterlist *scatterlist = sglist->scatterlist;
3002 int i;
3003
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003004 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003006 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 ioarcb->write_ioadl_len =
3008 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3009
3010 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3011 ioadl[i].flags_and_data_len =
3012 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3013 ioadl[i].address =
3014 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3015 }
3016
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003017 ioadl[i-1].flags_and_data_len |=
3018 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3019}
3020
3021/**
3022 * ipr_update_ioa_ucode - Update IOA's microcode
3023 * @ioa_cfg: ioa config struct
3024 * @sglist: scatter/gather list
3025 *
3026 * Initiate an adapter reset to update the IOA's microcode
3027 *
3028 * Return value:
3029 * 0 on success / -EIO on failure
3030 **/
3031static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3032 struct ipr_sglist *sglist)
3033{
3034 unsigned long lock_flags;
3035
3036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003037 while(ioa_cfg->in_reset_reload) {
3038 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3039 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3041 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003042
3043 if (ioa_cfg->ucode_sglist) {
3044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3045 dev_err(&ioa_cfg->pdev->dev,
3046 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 return -EIO;
3048 }
3049
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003050 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3051 sglist->num_sg, DMA_TO_DEVICE);
3052
3053 if (!sglist->num_dma_sg) {
3054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3055 dev_err(&ioa_cfg->pdev->dev,
3056 "Failed to map microcode download buffer!\n");
3057 return -EIO;
3058 }
3059
3060 ioa_cfg->ucode_sglist = sglist;
3061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3064
3065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3066 ioa_cfg->ucode_sglist = NULL;
3067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 return 0;
3069}
3070
3071/**
3072 * ipr_store_update_fw - Update the firmware on the adapter
3073 * @class_dev: class_device struct
3074 * @buf: buffer
3075 * @count: buffer size
3076 *
3077 * This function will update the firmware on the adapter.
3078 *
3079 * Return value:
3080 * count on success / other on failure
3081 **/
3082static ssize_t ipr_store_update_fw(struct class_device *class_dev,
3083 const char *buf, size_t count)
3084{
3085 struct Scsi_Host *shost = class_to_shost(class_dev);
3086 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3087 struct ipr_ucode_image_header *image_hdr;
3088 const struct firmware *fw_entry;
3089 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 char fname[100];
3091 char *src;
3092 int len, result, dnld_size;
3093
3094 if (!capable(CAP_SYS_ADMIN))
3095 return -EACCES;
3096
3097 len = snprintf(fname, 99, "%s", buf);
3098 fname[len-1] = '\0';
3099
3100 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3101 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3102 return -EIO;
3103 }
3104
3105 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3106
3107 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3108 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3109 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3110 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3111 release_firmware(fw_entry);
3112 return -EINVAL;
3113 }
3114
3115 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3116 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3117 sglist = ipr_alloc_ucode_buffer(dnld_size);
3118
3119 if (!sglist) {
3120 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3121 release_firmware(fw_entry);
3122 return -ENOMEM;
3123 }
3124
3125 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3126
3127 if (result) {
3128 dev_err(&ioa_cfg->pdev->dev,
3129 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003130 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 }
3132
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003133 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003135 if (!result)
3136 result = count;
3137out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 ipr_free_ucode_buffer(sglist);
3139 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003140 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141}
3142
3143static struct class_device_attribute ipr_update_fw_attr = {
3144 .attr = {
3145 .name = "update_fw",
3146 .mode = S_IWUSR,
3147 },
3148 .store = ipr_store_update_fw
3149};
3150
3151static struct class_device_attribute *ipr_ioa_attrs[] = {
3152 &ipr_fw_version_attr,
3153 &ipr_log_level_attr,
3154 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003155 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 &ipr_ioa_reset_attr,
3157 &ipr_update_fw_attr,
brking@us.ibm.com62275042005-11-01 17:01:14 -06003158 &ipr_ioa_cache_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 NULL,
3160};
3161
3162#ifdef CONFIG_SCSI_IPR_DUMP
3163/**
3164 * ipr_read_dump - Dump the adapter
3165 * @kobj: kobject struct
3166 * @buf: buffer
3167 * @off: offset
3168 * @count: buffer size
3169 *
3170 * Return value:
3171 * number of bytes printed to buffer
3172 **/
3173static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
3174 loff_t off, size_t count)
3175{
3176 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3177 struct Scsi_Host *shost = class_to_shost(cdev);
3178 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3179 struct ipr_dump *dump;
3180 unsigned long lock_flags = 0;
3181 char *src;
3182 int len;
3183 size_t rc = count;
3184
3185 if (!capable(CAP_SYS_ADMIN))
3186 return -EACCES;
3187
3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3189 dump = ioa_cfg->dump;
3190
3191 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3193 return 0;
3194 }
3195 kref_get(&dump->kref);
3196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3197
3198 if (off > dump->driver_dump.hdr.len) {
3199 kref_put(&dump->kref, ipr_release_dump);
3200 return 0;
3201 }
3202
3203 if (off + count > dump->driver_dump.hdr.len) {
3204 count = dump->driver_dump.hdr.len - off;
3205 rc = count;
3206 }
3207
3208 if (count && off < sizeof(dump->driver_dump)) {
3209 if (off + count > sizeof(dump->driver_dump))
3210 len = sizeof(dump->driver_dump) - off;
3211 else
3212 len = count;
3213 src = (u8 *)&dump->driver_dump + off;
3214 memcpy(buf, src, len);
3215 buf += len;
3216 off += len;
3217 count -= len;
3218 }
3219
3220 off -= sizeof(dump->driver_dump);
3221
3222 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3223 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3224 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3225 else
3226 len = count;
3227 src = (u8 *)&dump->ioa_dump + off;
3228 memcpy(buf, src, len);
3229 buf += len;
3230 off += len;
3231 count -= len;
3232 }
3233
3234 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3235
3236 while (count) {
3237 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3238 len = PAGE_ALIGN(off) - off;
3239 else
3240 len = count;
3241 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3242 src += off & ~PAGE_MASK;
3243 memcpy(buf, src, len);
3244 buf += len;
3245 off += len;
3246 count -= len;
3247 }
3248
3249 kref_put(&dump->kref, ipr_release_dump);
3250 return rc;
3251}
3252
3253/**
3254 * ipr_alloc_dump - Prepare for adapter dump
3255 * @ioa_cfg: ioa config struct
3256 *
3257 * Return value:
3258 * 0 on success / other on failure
3259 **/
3260static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3261{
3262 struct ipr_dump *dump;
3263 unsigned long lock_flags = 0;
3264
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003265 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
3267 if (!dump) {
3268 ipr_err("Dump memory allocation failed\n");
3269 return -ENOMEM;
3270 }
3271
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 kref_init(&dump->kref);
3273 dump->ioa_cfg = ioa_cfg;
3274
3275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3276
3277 if (INACTIVE != ioa_cfg->sdt_state) {
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 kfree(dump);
3280 return 0;
3281 }
3282
3283 ioa_cfg->dump = dump;
3284 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3285 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3286 ioa_cfg->dump_taken = 1;
3287 schedule_work(&ioa_cfg->work_q);
3288 }
3289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 return 0;
3292}
3293
3294/**
3295 * ipr_free_dump - Free adapter dump memory
3296 * @ioa_cfg: ioa config struct
3297 *
3298 * Return value:
3299 * 0 on success / other on failure
3300 **/
3301static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3302{
3303 struct ipr_dump *dump;
3304 unsigned long lock_flags = 0;
3305
3306 ENTER;
3307
3308 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3309 dump = ioa_cfg->dump;
3310 if (!dump) {
3311 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3312 return 0;
3313 }
3314
3315 ioa_cfg->dump = NULL;
3316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3317
3318 kref_put(&dump->kref, ipr_release_dump);
3319
3320 LEAVE;
3321 return 0;
3322}
3323
3324/**
3325 * ipr_write_dump - Setup dump state of adapter
3326 * @kobj: kobject struct
3327 * @buf: buffer
3328 * @off: offset
3329 * @count: buffer size
3330 *
3331 * Return value:
3332 * number of bytes printed to buffer
3333 **/
3334static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3335 loff_t off, size_t count)
3336{
3337 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3338 struct Scsi_Host *shost = class_to_shost(cdev);
3339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3340 int rc;
3341
3342 if (!capable(CAP_SYS_ADMIN))
3343 return -EACCES;
3344
3345 if (buf[0] == '1')
3346 rc = ipr_alloc_dump(ioa_cfg);
3347 else if (buf[0] == '0')
3348 rc = ipr_free_dump(ioa_cfg);
3349 else
3350 return -EINVAL;
3351
3352 if (rc)
3353 return rc;
3354 else
3355 return count;
3356}
3357
3358static struct bin_attribute ipr_dump_attr = {
3359 .attr = {
3360 .name = "dump",
3361 .mode = S_IRUSR | S_IWUSR,
3362 },
3363 .size = 0,
3364 .read = ipr_read_dump,
3365 .write = ipr_write_dump
3366};
3367#else
3368static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3369#endif
3370
3371/**
3372 * ipr_change_queue_depth - Change the device's queue depth
3373 * @sdev: scsi device struct
3374 * @qdepth: depth to set
3375 *
3376 * Return value:
3377 * actual depth set
3378 **/
3379static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3380{
Brian King35a39692006-09-25 12:39:20 -05003381 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3382 struct ipr_resource_entry *res;
3383 unsigned long lock_flags = 0;
3384
3385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3386 res = (struct ipr_resource_entry *)sdev->hostdata;
3387
3388 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3389 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3391
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3393 return sdev->queue_depth;
3394}
3395
3396/**
3397 * ipr_change_queue_type - Change the device's queue type
3398 * @dsev: scsi device struct
3399 * @tag_type: type of tags to use
3400 *
3401 * Return value:
3402 * actual queue type set
3403 **/
3404static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3405{
3406 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3407 struct ipr_resource_entry *res;
3408 unsigned long lock_flags = 0;
3409
3410 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411 res = (struct ipr_resource_entry *)sdev->hostdata;
3412
3413 if (res) {
3414 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3415 /*
3416 * We don't bother quiescing the device here since the
3417 * adapter firmware does it for us.
3418 */
3419 scsi_set_tag_type(sdev, tag_type);
3420
3421 if (tag_type)
3422 scsi_activate_tcq(sdev, sdev->queue_depth);
3423 else
3424 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3425 } else
3426 tag_type = 0;
3427 } else
3428 tag_type = 0;
3429
3430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3431 return tag_type;
3432}
3433
3434/**
3435 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3436 * @dev: device struct
3437 * @buf: buffer
3438 *
3439 * Return value:
3440 * number of bytes printed to buffer
3441 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04003442static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443{
3444 struct scsi_device *sdev = to_scsi_device(dev);
3445 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3446 struct ipr_resource_entry *res;
3447 unsigned long lock_flags = 0;
3448 ssize_t len = -ENXIO;
3449
3450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3451 res = (struct ipr_resource_entry *)sdev->hostdata;
3452 if (res)
3453 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 return len;
3456}
3457
3458static struct device_attribute ipr_adapter_handle_attr = {
3459 .attr = {
3460 .name = "adapter_handle",
3461 .mode = S_IRUSR,
3462 },
3463 .show = ipr_show_adapter_handle
3464};
3465
3466static struct device_attribute *ipr_dev_attrs[] = {
3467 &ipr_adapter_handle_attr,
3468 NULL,
3469};
3470
3471/**
3472 * ipr_biosparam - Return the HSC mapping
3473 * @sdev: scsi device struct
3474 * @block_device: block device pointer
3475 * @capacity: capacity of the device
3476 * @parm: Array containing returned HSC values.
3477 *
3478 * This function generates the HSC parms that fdisk uses.
3479 * We want to make sure we return something that places partitions
3480 * on 4k boundaries for best performance with the IOA.
3481 *
3482 * Return value:
3483 * 0 on success
3484 **/
3485static int ipr_biosparam(struct scsi_device *sdev,
3486 struct block_device *block_device,
3487 sector_t capacity, int *parm)
3488{
3489 int heads, sectors;
3490 sector_t cylinders;
3491
3492 heads = 128;
3493 sectors = 32;
3494
3495 cylinders = capacity;
3496 sector_div(cylinders, (128 * 32));
3497
3498 /* return result */
3499 parm[0] = heads;
3500 parm[1] = sectors;
3501 parm[2] = cylinders;
3502
3503 return 0;
3504}
3505
3506/**
Brian King35a39692006-09-25 12:39:20 -05003507 * ipr_find_starget - Find target based on bus/target.
3508 * @starget: scsi target struct
3509 *
3510 * Return value:
3511 * resource entry pointer if found / NULL if not found
3512 **/
3513static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3514{
3515 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3516 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3517 struct ipr_resource_entry *res;
3518
3519 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3520 if ((res->cfgte.res_addr.bus == starget->channel) &&
3521 (res->cfgte.res_addr.target == starget->id) &&
3522 (res->cfgte.res_addr.lun == 0)) {
3523 return res;
3524 }
3525 }
3526
3527 return NULL;
3528}
3529
3530static struct ata_port_info sata_port_info;
3531
3532/**
3533 * ipr_target_alloc - Prepare for commands to a SCSI target
3534 * @starget: scsi target struct
3535 *
3536 * If the device is a SATA device, this function allocates an
3537 * ATA port with libata, else it does nothing.
3538 *
3539 * Return value:
3540 * 0 on success / non-0 on failure
3541 **/
3542static int ipr_target_alloc(struct scsi_target *starget)
3543{
3544 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3545 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
3546 struct ipr_sata_port *sata_port;
3547 struct ata_port *ap;
3548 struct ipr_resource_entry *res;
3549 unsigned long lock_flags;
3550
3551 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3552 res = ipr_find_starget(starget);
3553 starget->hostdata = NULL;
3554
3555 if (res && ipr_is_gata(res)) {
3556 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3557 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
3558 if (!sata_port)
3559 return -ENOMEM;
3560
3561 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
3562 if (ap) {
3563 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3564 sata_port->ioa_cfg = ioa_cfg;
3565 sata_port->ap = ap;
3566 sata_port->res = res;
3567
3568 res->sata_port = sata_port;
3569 ap->private_data = sata_port;
3570 starget->hostdata = sata_port;
3571 } else {
3572 kfree(sata_port);
3573 return -ENOMEM;
3574 }
3575 }
3576 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3577
3578 return 0;
3579}
3580
3581/**
3582 * ipr_target_destroy - Destroy a SCSI target
3583 * @starget: scsi target struct
3584 *
3585 * If the device was a SATA device, this function frees the libata
3586 * ATA port, else it does nothing.
3587 *
3588 **/
3589static void ipr_target_destroy(struct scsi_target *starget)
3590{
3591 struct ipr_sata_port *sata_port = starget->hostdata;
3592
3593 if (sata_port) {
3594 starget->hostdata = NULL;
3595 ata_sas_port_destroy(sata_port->ap);
3596 kfree(sata_port);
3597 }
3598}
3599
3600/**
3601 * ipr_find_sdev - Find device based on bus/target/lun.
3602 * @sdev: scsi device struct
3603 *
3604 * Return value:
3605 * resource entry pointer if found / NULL if not found
3606 **/
3607static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3608{
3609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3610 struct ipr_resource_entry *res;
3611
3612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3613 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3614 (res->cfgte.res_addr.target == sdev->id) &&
3615 (res->cfgte.res_addr.lun == sdev->lun))
3616 return res;
3617 }
3618
3619 return NULL;
3620}
3621
3622/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 * ipr_slave_destroy - Unconfigure a SCSI device
3624 * @sdev: scsi device struct
3625 *
3626 * Return value:
3627 * nothing
3628 **/
3629static void ipr_slave_destroy(struct scsi_device *sdev)
3630{
3631 struct ipr_resource_entry *res;
3632 struct ipr_ioa_cfg *ioa_cfg;
3633 unsigned long lock_flags = 0;
3634
3635 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3636
3637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3638 res = (struct ipr_resource_entry *) sdev->hostdata;
3639 if (res) {
Brian King35a39692006-09-25 12:39:20 -05003640 if (res->sata_port)
3641 ata_port_disable(res->sata_port->ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 sdev->hostdata = NULL;
3643 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05003644 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 }
3646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3647}
3648
3649/**
3650 * ipr_slave_configure - Configure a SCSI device
3651 * @sdev: scsi device struct
3652 *
3653 * This function configures the specified scsi device.
3654 *
3655 * Return value:
3656 * 0 on success
3657 **/
3658static int ipr_slave_configure(struct scsi_device *sdev)
3659{
3660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3661 struct ipr_resource_entry *res;
3662 unsigned long lock_flags = 0;
3663
3664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3665 res = sdev->hostdata;
3666 if (res) {
3667 if (ipr_is_af_dasd_device(res))
3668 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06003669 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06003671 sdev->no_uld_attach = 1;
3672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 if (ipr_is_vset_device(res)) {
3674 sdev->timeout = IPR_VSET_RW_TIMEOUT;
3675 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3676 }
Brian Kinge4fbf442006-03-29 09:37:22 -06003677 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 sdev->allow_restart = 1;
Brian King35a39692006-09-25 12:39:20 -05003679 if (ipr_is_gata(res) && res->sata_port) {
3680 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
3681 ata_sas_slave_configure(sdev, res->sata_port->ap);
3682 } else {
3683 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 }
3686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3687 return 0;
3688}
3689
3690/**
Brian King35a39692006-09-25 12:39:20 -05003691 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
3692 * @sdev: scsi device struct
3693 *
3694 * This function initializes an ATA port so that future commands
3695 * sent through queuecommand will work.
3696 *
3697 * Return value:
3698 * 0 on success
3699 **/
3700static int ipr_ata_slave_alloc(struct scsi_device *sdev)
3701{
3702 struct ipr_sata_port *sata_port = NULL;
3703 int rc = -ENXIO;
3704
3705 ENTER;
3706 if (sdev->sdev_target)
3707 sata_port = sdev->sdev_target->hostdata;
3708 if (sata_port)
3709 rc = ata_sas_port_init(sata_port->ap);
3710 if (rc)
3711 ipr_slave_destroy(sdev);
3712
3713 LEAVE;
3714 return rc;
3715}
3716
3717/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 * ipr_slave_alloc - Prepare for commands to a device.
3719 * @sdev: scsi device struct
3720 *
3721 * This function saves a pointer to the resource entry
3722 * in the scsi device struct if the device exists. We
3723 * can then use this pointer in ipr_queuecommand when
3724 * handling new commands.
3725 *
3726 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003727 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 **/
3729static int ipr_slave_alloc(struct scsi_device *sdev)
3730{
3731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3732 struct ipr_resource_entry *res;
3733 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003734 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735
3736 sdev->hostdata = NULL;
3737
3738 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3739
Brian King35a39692006-09-25 12:39:20 -05003740 res = ipr_find_sdev(sdev);
3741 if (res) {
3742 res->sdev = sdev;
3743 res->add_to_ml = 0;
3744 res->in_erp = 0;
3745 sdev->hostdata = res;
3746 if (!ipr_is_naca_model(res))
3747 res->needs_sync_complete = 1;
3748 rc = 0;
3749 if (ipr_is_gata(res)) {
3750 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3751 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 }
3753 }
3754
3755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3756
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06003757 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758}
3759
3760/**
3761 * ipr_eh_host_reset - Reset the host adapter
3762 * @scsi_cmd: scsi command struct
3763 *
3764 * Return value:
3765 * SUCCESS / FAILED
3766 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04003767static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768{
3769 struct ipr_ioa_cfg *ioa_cfg;
3770 int rc;
3771
3772 ENTER;
3773 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3774
3775 dev_err(&ioa_cfg->pdev->dev,
3776 "Adapter being reset as a result of error recovery.\n");
3777
3778 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3779 ioa_cfg->sdt_state = GET_DUMP;
3780
3781 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3782
3783 LEAVE;
3784 return rc;
3785}
3786
Jeff Garzik df0ae242005-05-28 07:57:14 -04003787static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3788{
3789 int rc;
3790
3791 spin_lock_irq(cmd->device->host->host_lock);
3792 rc = __ipr_eh_host_reset(cmd);
3793 spin_unlock_irq(cmd->device->host->host_lock);
3794
3795 return rc;
3796}
3797
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798/**
Brian Kingc6513092006-03-29 09:37:43 -06003799 * ipr_device_reset - Reset the device
3800 * @ioa_cfg: ioa config struct
3801 * @res: resource entry struct
3802 *
3803 * This function issues a device reset to the affected device.
3804 * If the device is a SCSI device, a LUN reset will be sent
3805 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05003806 * will be sent. If the device is a SATA device, a PHY reset will
3807 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06003808 *
3809 * Return value:
3810 * 0 on success / non-zero on failure
3811 **/
3812static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3813 struct ipr_resource_entry *res)
3814{
3815 struct ipr_cmnd *ipr_cmd;
3816 struct ipr_ioarcb *ioarcb;
3817 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05003818 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06003819 u32 ioasc;
3820
3821 ENTER;
3822 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3823 ioarcb = &ipr_cmd->ioarcb;
3824 cmd_pkt = &ioarcb->cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05003825 regs = &ioarcb->add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06003826
3827 ioarcb->res_handle = res->cfgte.res_handle;
3828 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3829 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05003830 if (ipr_is_gata(res)) {
3831 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3832 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
3833 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3834 }
Brian Kingc6513092006-03-29 09:37:43 -06003835
3836 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3837 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3838 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
Brian King35a39692006-09-25 12:39:20 -05003839 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
3840 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
3841 sizeof(struct ipr_ioasa_gata));
Brian Kingc6513092006-03-29 09:37:43 -06003842
3843 LEAVE;
3844 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3845}
3846
3847/**
Brian King35a39692006-09-25 12:39:20 -05003848 * ipr_sata_reset - Reset the SATA port
3849 * @ap: SATA port to reset
3850 * @classes: class of the attached device
3851 *
3852 * This function issues a SATA phy reset to the affected ATA port.
3853 *
3854 * Return value:
3855 * 0 on success / non-zero on failure
3856 **/
Andrew Morton120bda32007-03-26 02:17:43 -07003857static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
3858 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05003859{
3860 struct ipr_sata_port *sata_port = ap->private_data;
3861 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
3862 struct ipr_resource_entry *res;
3863 unsigned long lock_flags = 0;
3864 int rc = -ENXIO;
3865
3866 ENTER;
3867 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King73d98ff2006-11-21 10:27:58 -06003868 while(ioa_cfg->in_reset_reload) {
3869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3870 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3871 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3872 }
3873
Brian King35a39692006-09-25 12:39:20 -05003874 res = sata_port->res;
3875 if (res) {
3876 rc = ipr_device_reset(ioa_cfg, res);
3877 switch(res->cfgte.proto) {
3878 case IPR_PROTO_SATA:
3879 case IPR_PROTO_SAS_STP:
3880 *classes = ATA_DEV_ATA;
3881 break;
3882 case IPR_PROTO_SATA_ATAPI:
3883 case IPR_PROTO_SAS_STP_ATAPI:
3884 *classes = ATA_DEV_ATAPI;
3885 break;
3886 default:
3887 *classes = ATA_DEV_UNKNOWN;
3888 break;
3889 };
3890 }
3891
3892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3893 LEAVE;
3894 return rc;
3895}
3896
3897/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 * ipr_eh_dev_reset - Reset the device
3899 * @scsi_cmd: scsi command struct
3900 *
3901 * This function issues a device reset to the affected device.
3902 * A LUN reset will be sent to the device first. If that does
3903 * not work, a target reset will be sent.
3904 *
3905 * Return value:
3906 * SUCCESS / FAILED
3907 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003908static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909{
3910 struct ipr_cmnd *ipr_cmd;
3911 struct ipr_ioa_cfg *ioa_cfg;
3912 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05003913 struct ata_port *ap;
3914 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915
3916 ENTER;
3917 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3918 res = scsi_cmd->device->hostdata;
3919
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06003920 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 return FAILED;
3922
3923 /*
3924 * If we are currently going through reset/reload, return failed. This will force the
3925 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3926 * reset to complete
3927 */
3928 if (ioa_cfg->in_reset_reload)
3929 return FAILED;
3930 if (ioa_cfg->ioa_is_dead)
3931 return FAILED;
3932
3933 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3934 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3935 if (ipr_cmd->scsi_cmd)
3936 ipr_cmd->done = ipr_scsi_eh_done;
Brian King24d6f2b2007-03-29 12:43:17 -05003937 if (ipr_cmd->qc)
3938 ipr_cmd->done = ipr_sata_eh_done;
Brian King7402ece2006-11-21 10:28:23 -06003939 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3940 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3941 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 }
3944 }
3945
3946 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06003947 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05003948
3949 if (ipr_is_gata(res) && res->sata_port) {
3950 ap = res->sata_port->ap;
3951 spin_unlock_irq(scsi_cmd->device->host->host_lock);
3952 ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
3953 spin_lock_irq(scsi_cmd->device->host->host_lock);
3954 } else
3955 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 res->resetting_device = 0;
3957
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 LEAVE;
Brian Kingc6513092006-03-29 09:37:43 -06003959 return (rc ? FAILED : SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960}
3961
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04003962static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3963{
3964 int rc;
3965
3966 spin_lock_irq(cmd->device->host->host_lock);
3967 rc = __ipr_eh_dev_reset(cmd);
3968 spin_unlock_irq(cmd->device->host->host_lock);
3969
3970 return rc;
3971}
3972
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973/**
3974 * ipr_bus_reset_done - Op done function for bus reset.
3975 * @ipr_cmd: ipr command struct
3976 *
3977 * This function is the op done function for a bus reset
3978 *
3979 * Return value:
3980 * none
3981 **/
3982static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3983{
3984 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3985 struct ipr_resource_entry *res;
3986
3987 ENTER;
3988 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3989 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3990 sizeof(res->cfgte.res_handle))) {
3991 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3992 break;
3993 }
3994 }
3995
3996 /*
3997 * If abort has not completed, indicate the reset has, else call the
3998 * abort's done function to wake the sleeping eh thread
3999 */
4000 if (ipr_cmd->sibling->sibling)
4001 ipr_cmd->sibling->sibling = NULL;
4002 else
4003 ipr_cmd->sibling->done(ipr_cmd->sibling);
4004
4005 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4006 LEAVE;
4007}
4008
4009/**
4010 * ipr_abort_timeout - An abort task has timed out
4011 * @ipr_cmd: ipr command struct
4012 *
4013 * This function handles when an abort task times out. If this
4014 * happens we issue a bus reset since we have resources tied
4015 * up that must be freed before returning to the midlayer.
4016 *
4017 * Return value:
4018 * none
4019 **/
4020static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4021{
4022 struct ipr_cmnd *reset_cmd;
4023 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4024 struct ipr_cmd_pkt *cmd_pkt;
4025 unsigned long lock_flags = 0;
4026
4027 ENTER;
4028 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4029 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4031 return;
4032 }
4033
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004034 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4036 ipr_cmd->sibling = reset_cmd;
4037 reset_cmd->sibling = ipr_cmd;
4038 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4039 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4040 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4041 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4042 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4043
4044 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4046 LEAVE;
4047}
4048
4049/**
4050 * ipr_cancel_op - Cancel specified op
4051 * @scsi_cmd: scsi command struct
4052 *
4053 * This function cancels specified op.
4054 *
4055 * Return value:
4056 * SUCCESS / FAILED
4057 **/
4058static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4059{
4060 struct ipr_cmnd *ipr_cmd;
4061 struct ipr_ioa_cfg *ioa_cfg;
4062 struct ipr_resource_entry *res;
4063 struct ipr_cmd_pkt *cmd_pkt;
4064 u32 ioasc;
4065 int op_found = 0;
4066
4067 ENTER;
4068 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4069 res = scsi_cmd->device->hostdata;
4070
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004071 /* If we are currently going through reset/reload, return failed.
4072 * This will force the mid-layer to call ipr_eh_host_reset,
4073 * which will then go to sleep and wait for the reset to complete
4074 */
4075 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4076 return FAILED;
Brian King04d97682006-11-21 10:28:04 -06004077 if (!res || !ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 return FAILED;
4079
4080 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4081 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4082 ipr_cmd->done = ipr_scsi_eh_done;
4083 op_found = 1;
4084 break;
4085 }
4086 }
4087
4088 if (!op_found)
4089 return SUCCESS;
4090
4091 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4092 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
4093 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4094 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4095 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4096 ipr_cmd->u.sdev = scsi_cmd->device;
4097
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004098 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4099 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4101 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4102
4103 /*
4104 * If the abort task timed out and we sent a bus reset, we will get
4105 * one the following responses to the abort
4106 */
4107 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4108 ioasc = 0;
4109 ipr_trace;
4110 }
4111
4112 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004113 if (!ipr_is_naca_model(res))
4114 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
4116 LEAVE;
4117 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4118}
4119
4120/**
4121 * ipr_eh_abort - Abort a single op
4122 * @scsi_cmd: scsi command struct
4123 *
4124 * Return value:
4125 * SUCCESS / FAILED
4126 **/
4127static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4128{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004129 unsigned long flags;
4130 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
4132 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004134 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4135 rc = ipr_cancel_op(scsi_cmd);
4136 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137
4138 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004139 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140}
4141
4142/**
4143 * ipr_handle_other_interrupt - Handle "other" interrupts
4144 * @ioa_cfg: ioa config struct
4145 * @int_reg: interrupt register
4146 *
4147 * Return value:
4148 * IRQ_NONE / IRQ_HANDLED
4149 **/
4150static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4151 volatile u32 int_reg)
4152{
4153 irqreturn_t rc = IRQ_HANDLED;
4154
4155 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4156 /* Mask the interrupt */
4157 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4158
4159 /* Clear the interrupt */
4160 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4161 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4162
4163 list_del(&ioa_cfg->reset_cmd->queue);
4164 del_timer(&ioa_cfg->reset_cmd->timer);
4165 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4166 } else {
4167 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4168 ioa_cfg->ioa_unit_checked = 1;
4169 else
4170 dev_err(&ioa_cfg->pdev->dev,
4171 "Permanent IOA failure. 0x%08X\n", int_reg);
4172
4173 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4174 ioa_cfg->sdt_state = GET_DUMP;
4175
4176 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4177 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4178 }
4179
4180 return rc;
4181}
4182
4183/**
4184 * ipr_isr - Interrupt service routine
4185 * @irq: irq number
4186 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 *
4188 * Return value:
4189 * IRQ_NONE / IRQ_HANDLED
4190 **/
David Howells7d12e782006-10-05 14:55:46 +01004191static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192{
4193 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4194 unsigned long lock_flags = 0;
4195 volatile u32 int_reg, int_mask_reg;
4196 u32 ioasc;
4197 u16 cmd_index;
4198 struct ipr_cmnd *ipr_cmd;
4199 irqreturn_t rc = IRQ_NONE;
4200
4201 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4202
4203 /* If interrupts are disabled, ignore the interrupt */
4204 if (!ioa_cfg->allow_interrupts) {
4205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4206 return IRQ_NONE;
4207 }
4208
4209 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4210 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4211
4212 /* If an interrupt on the adapter did not occur, ignore it */
4213 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4215 return IRQ_NONE;
4216 }
4217
4218 while (1) {
4219 ipr_cmd = NULL;
4220
4221 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4222 ioa_cfg->toggle_bit) {
4223
4224 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4225 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4226
4227 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4228 ioa_cfg->errors_logged++;
4229 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4230
4231 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4232 ioa_cfg->sdt_state = GET_DUMP;
4233
4234 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4236 return IRQ_HANDLED;
4237 }
4238
4239 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4240
4241 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4242
4243 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4244
4245 list_del(&ipr_cmd->queue);
4246 del_timer(&ipr_cmd->timer);
4247 ipr_cmd->done(ipr_cmd);
4248
4249 rc = IRQ_HANDLED;
4250
4251 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4252 ioa_cfg->hrrq_curr++;
4253 } else {
4254 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4255 ioa_cfg->toggle_bit ^= 1u;
4256 }
4257 }
4258
4259 if (ipr_cmd != NULL) {
4260 /* Clear the PCI interrupt */
4261 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4262 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4263 } else
4264 break;
4265 }
4266
4267 if (unlikely(rc == IRQ_NONE))
4268 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4269
4270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4271 return rc;
4272}
4273
4274/**
4275 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4276 * @ioa_cfg: ioa config struct
4277 * @ipr_cmd: ipr command struct
4278 *
4279 * Return value:
4280 * 0 on success / -1 on failure
4281 **/
4282static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4283 struct ipr_cmnd *ipr_cmd)
4284{
4285 int i;
4286 struct scatterlist *sglist;
4287 u32 length;
4288 u32 ioadl_flags = 0;
4289 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4290 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4291 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4292
4293 length = scsi_cmd->request_bufflen;
4294
4295 if (length == 0)
4296 return 0;
4297
4298 if (scsi_cmd->use_sg) {
4299 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
4300 scsi_cmd->request_buffer,
4301 scsi_cmd->use_sg,
4302 scsi_cmd->sc_data_direction);
4303
4304 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4305 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4306 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4307 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4308 ioarcb->write_ioadl_len =
4309 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4310 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4311 ioadl_flags = IPR_IOADL_FLAGS_READ;
4312 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4313 ioarcb->read_ioadl_len =
4314 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4315 }
4316
4317 sglist = scsi_cmd->request_buffer;
4318
Brian King51b1c7e2007-03-29 12:43:50 -05004319 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4320 ioadl = ioarcb->add_data.u.ioadl;
4321 ioarcb->write_ioadl_addr =
4322 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4323 offsetof(struct ipr_ioarcb, add_data));
4324 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4325 }
4326
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4328 ioadl[i].flags_and_data_len =
4329 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
4330 ioadl[i].address =
4331 cpu_to_be32(sg_dma_address(&sglist[i]));
4332 }
4333
4334 if (likely(ipr_cmd->dma_use_sg)) {
4335 ioadl[i-1].flags_and_data_len |=
4336 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4337 return 0;
4338 } else
4339 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4340 } else {
4341 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4342 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4343 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4344 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4345 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4346 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4347 ioadl_flags = IPR_IOADL_FLAGS_READ;
4348 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4349 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4350 }
4351
4352 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
4353 scsi_cmd->request_buffer, length,
4354 scsi_cmd->sc_data_direction);
4355
4356 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
Brian King51b1c7e2007-03-29 12:43:50 -05004357 ioadl = ioarcb->add_data.u.ioadl;
4358 ioarcb->write_ioadl_addr =
4359 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4360 offsetof(struct ipr_ioarcb, add_data));
4361 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 ipr_cmd->dma_use_sg = 1;
4363 ioadl[0].flags_and_data_len =
4364 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4365 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4366 return 0;
4367 } else
4368 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4369 }
4370
4371 return -1;
4372}
4373
4374/**
4375 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
4376 * @scsi_cmd: scsi command struct
4377 *
4378 * Return value:
4379 * task attributes
4380 **/
4381static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
4382{
4383 u8 tag[2];
4384 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
4385
4386 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
4387 switch (tag[0]) {
4388 case MSG_SIMPLE_TAG:
4389 rc = IPR_FLAGS_LO_SIMPLE_TASK;
4390 break;
4391 case MSG_HEAD_TAG:
4392 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
4393 break;
4394 case MSG_ORDERED_TAG:
4395 rc = IPR_FLAGS_LO_ORDERED_TASK;
4396 break;
4397 };
4398 }
4399
4400 return rc;
4401}
4402
4403/**
4404 * ipr_erp_done - Process completion of ERP for a device
4405 * @ipr_cmd: ipr command struct
4406 *
4407 * This function copies the sense buffer into the scsi_cmd
4408 * struct and pushes the scsi_done function.
4409 *
4410 * Return value:
4411 * nothing
4412 **/
4413static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4414{
4415 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4416 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4417 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4418 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4419
4420 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4421 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004422 scmd_printk(KERN_ERR, scsi_cmd,
4423 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424 } else {
4425 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
4426 SCSI_SENSE_BUFFERSIZE);
4427 }
4428
4429 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004430 if (!ipr_is_naca_model(res))
4431 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 res->in_erp = 0;
4433 }
4434 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4435 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4436 scsi_cmd->scsi_done(scsi_cmd);
4437}
4438
4439/**
4440 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
4441 * @ipr_cmd: ipr command struct
4442 *
4443 * Return value:
4444 * none
4445 **/
4446static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4447{
Brian King51b1c7e2007-03-29 12:43:50 -05004448 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4449 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4450 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451
4452 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4453 ioarcb->write_data_transfer_length = 0;
4454 ioarcb->read_data_transfer_length = 0;
4455 ioarcb->write_ioadl_len = 0;
4456 ioarcb->read_ioadl_len = 0;
4457 ioasa->ioasc = 0;
4458 ioasa->residual_data_len = 0;
Brian King51b1c7e2007-03-29 12:43:50 -05004459 ioarcb->write_ioadl_addr =
4460 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4461 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462}
4463
4464/**
4465 * ipr_erp_request_sense - Send request sense to a device
4466 * @ipr_cmd: ipr command struct
4467 *
4468 * This function sends a request sense to a device as a result
4469 * of a check condition.
4470 *
4471 * Return value:
4472 * nothing
4473 **/
4474static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4475{
4476 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4477 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4478
4479 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
4480 ipr_erp_done(ipr_cmd);
4481 return;
4482 }
4483
4484 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4485
4486 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
4487 cmd_pkt->cdb[0] = REQUEST_SENSE;
4488 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
4489 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
4490 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4491 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4492
4493 ipr_cmd->ioadl[0].flags_and_data_len =
4494 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
4495 ipr_cmd->ioadl[0].address =
4496 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4497
4498 ipr_cmd->ioarcb.read_ioadl_len =
4499 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4500 ipr_cmd->ioarcb.read_data_transfer_length =
4501 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4502
4503 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4504 IPR_REQUEST_SENSE_TIMEOUT * 2);
4505}
4506
4507/**
4508 * ipr_erp_cancel_all - Send cancel all to a device
4509 * @ipr_cmd: ipr command struct
4510 *
4511 * This function sends a cancel all to a device to clear the
4512 * queue. If we are running TCQ on the device, QERR is set to 1,
4513 * which means all outstanding ops have been dropped on the floor.
4514 * Cancel all will return them to us.
4515 *
4516 * Return value:
4517 * nothing
4518 **/
4519static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
4520{
4521 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4522 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4523 struct ipr_cmd_pkt *cmd_pkt;
4524
4525 res->in_erp = 1;
4526
4527 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
4528
4529 if (!scsi_get_tag_type(scsi_cmd->device)) {
4530 ipr_erp_request_sense(ipr_cmd);
4531 return;
4532 }
4533
4534 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4535 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4536 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4537
4538 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
4539 IPR_CANCEL_ALL_TIMEOUT);
4540}
4541
4542/**
4543 * ipr_dump_ioasa - Dump contents of IOASA
4544 * @ioa_cfg: ioa config struct
4545 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06004546 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 *
4548 * This function is invoked by the interrupt handler when ops
4549 * fail. It will log the IOASA if appropriate. Only called
4550 * for GPDD ops.
4551 *
4552 * Return value:
4553 * none
4554 **/
4555static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06004556 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557{
4558 int i;
4559 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05004560 u32 ioasc, fd_ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4562 __be32 *ioasa_data = (__be32 *)ioasa;
4563 int error_index;
4564
4565 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
Brian Kingb0692dd2007-03-29 12:43:09 -05004566 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567
4568 if (0 == ioasc)
4569 return;
4570
4571 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4572 return;
4573
Brian Kingb0692dd2007-03-29 12:43:09 -05004574 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4575 error_index = ipr_get_error(fd_ioasc);
4576 else
4577 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
4579 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4580 /* Don't log an error if the IOA already logged one */
4581 if (ioasa->ilid != 0)
4582 return;
4583
Brian Kingcc9bd5d2007-03-29 12:43:01 -05004584 if (!ipr_is_gscsi(res))
4585 return;
4586
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 if (ipr_error_table[error_index].log_ioasa == 0)
4588 return;
4589 }
4590
Brian Kingfe964d02006-03-29 09:37:29 -06004591 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592
4593 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4594 data_len = sizeof(struct ipr_ioasa);
4595 else
4596 data_len = be16_to_cpu(ioasa->ret_stat_len);
4597
4598 ipr_err("IOASA Dump:\n");
4599
4600 for (i = 0; i < data_len / 4; i += 4) {
4601 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4602 be32_to_cpu(ioasa_data[i]),
4603 be32_to_cpu(ioasa_data[i+1]),
4604 be32_to_cpu(ioasa_data[i+2]),
4605 be32_to_cpu(ioasa_data[i+3]));
4606 }
4607}
4608
4609/**
4610 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4611 * @ioasa: IOASA
4612 * @sense_buf: sense data buffer
4613 *
4614 * Return value:
4615 * none
4616 **/
4617static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4618{
4619 u32 failing_lba;
4620 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4621 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4622 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4623 u32 ioasc = be32_to_cpu(ioasa->ioasc);
4624
4625 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4626
4627 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4628 return;
4629
4630 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4631
4632 if (ipr_is_vset_device(res) &&
4633 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4634 ioasa->u.vset.failing_lba_hi != 0) {
4635 sense_buf[0] = 0x72;
4636 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4637 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4638 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4639
4640 sense_buf[7] = 12;
4641 sense_buf[8] = 0;
4642 sense_buf[9] = 0x0A;
4643 sense_buf[10] = 0x80;
4644
4645 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4646
4647 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4648 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4649 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4650 sense_buf[15] = failing_lba & 0x000000ff;
4651
4652 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4653
4654 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4655 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4656 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4657 sense_buf[19] = failing_lba & 0x000000ff;
4658 } else {
4659 sense_buf[0] = 0x70;
4660 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4661 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4662 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4663
4664 /* Illegal request */
4665 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4666 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4667 sense_buf[7] = 10; /* additional length */
4668
4669 /* IOARCB was in error */
4670 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4671 sense_buf[15] = 0xC0;
4672 else /* Parameter data was invalid */
4673 sense_buf[15] = 0x80;
4674
4675 sense_buf[16] =
4676 ((IPR_FIELD_POINTER_MASK &
4677 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4678 sense_buf[17] =
4679 (IPR_FIELD_POINTER_MASK &
4680 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4681 } else {
4682 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4683 if (ipr_is_vset_device(res))
4684 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4685 else
4686 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4687
4688 sense_buf[0] |= 0x80; /* Or in the Valid bit */
4689 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4690 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4691 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4692 sense_buf[6] = failing_lba & 0x000000ff;
4693 }
4694
4695 sense_buf[7] = 6; /* additional length */
4696 }
4697 }
4698}
4699
4700/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004701 * ipr_get_autosense - Copy autosense data to sense buffer
4702 * @ipr_cmd: ipr command struct
4703 *
4704 * This function copies the autosense buffer to the buffer
4705 * in the scsi_cmd, if there is autosense available.
4706 *
4707 * Return value:
4708 * 1 if autosense was available / 0 if not
4709 **/
4710static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4711{
4712 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4713
Brian King117d2ce2006-08-02 14:57:58 -05004714 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004715 return 0;
4716
4717 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4718 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4719 SCSI_SENSE_BUFFERSIZE));
4720 return 1;
4721}
4722
4723/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724 * ipr_erp_start - Process an error response for a SCSI op
4725 * @ioa_cfg: ioa config struct
4726 * @ipr_cmd: ipr command struct
4727 *
4728 * This function determines whether or not to initiate ERP
4729 * on the affected device.
4730 *
4731 * Return value:
4732 * nothing
4733 **/
4734static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4735 struct ipr_cmnd *ipr_cmd)
4736{
4737 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4738 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4739 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Brian King8a048992007-04-26 16:00:10 -05004740 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741
4742 if (!res) {
4743 ipr_scsi_eh_done(ipr_cmd);
4744 return;
4745 }
4746
Brian King8a048992007-04-26 16:00:10 -05004747 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 ipr_gen_sense(ipr_cmd);
4749
Brian Kingcc9bd5d2007-03-29 12:43:01 -05004750 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4751
Brian King8a048992007-04-26 16:00:10 -05004752 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004754 if (ipr_is_naca_model(res))
4755 scsi_cmd->result |= (DID_ABORT << 16);
4756 else
4757 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758 break;
4759 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06004760 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 scsi_cmd->result |= (DID_NO_CONNECT << 16);
4762 break;
4763 case IPR_IOASC_HW_SEL_TIMEOUT:
4764 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004765 if (!ipr_is_naca_model(res))
4766 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767 break;
4768 case IPR_IOASC_SYNC_REQUIRED:
4769 if (!res->in_erp)
4770 res->needs_sync_complete = 1;
4771 scsi_cmd->result |= (DID_IMM_RETRY << 16);
4772 break;
4773 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06004774 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4776 break;
4777 case IPR_IOASC_BUS_WAS_RESET:
4778 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4779 /*
4780 * Report the bus reset and ask for a retry. The device
4781 * will give CC/UA the next command.
4782 */
4783 if (!res->resetting_device)
4784 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4785 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004786 if (!ipr_is_naca_model(res))
4787 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788 break;
4789 case IPR_IOASC_HW_DEV_BUS_STATUS:
4790 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4791 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004792 if (!ipr_get_autosense(ipr_cmd)) {
4793 if (!ipr_is_naca_model(res)) {
4794 ipr_erp_cancel_all(ipr_cmd);
4795 return;
4796 }
4797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004799 if (!ipr_is_naca_model(res))
4800 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 break;
4802 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4803 break;
4804 default:
Brian King5b7304f2006-08-02 14:57:51 -05004805 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4806 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004807 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 res->needs_sync_complete = 1;
4809 break;
4810 }
4811
4812 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4813 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4814 scsi_cmd->scsi_done(scsi_cmd);
4815}
4816
4817/**
4818 * ipr_scsi_done - mid-layer done function
4819 * @ipr_cmd: ipr command struct
4820 *
4821 * This function is invoked by the interrupt handler for
4822 * ops generated by the SCSI mid-layer
4823 *
4824 * Return value:
4825 * none
4826 **/
4827static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4828{
4829 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4830 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4831 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4832
4833 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4834
4835 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4836 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4837 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4838 scsi_cmd->scsi_done(scsi_cmd);
4839 } else
4840 ipr_erp_start(ioa_cfg, ipr_cmd);
4841}
4842
4843/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844 * ipr_queuecommand - Queue a mid-layer request
4845 * @scsi_cmd: scsi command struct
4846 * @done: done function
4847 *
4848 * This function queues a request generated by the mid-layer.
4849 *
4850 * Return value:
4851 * 0 on success
4852 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4853 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4854 **/
4855static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4856 void (*done) (struct scsi_cmnd *))
4857{
4858 struct ipr_ioa_cfg *ioa_cfg;
4859 struct ipr_resource_entry *res;
4860 struct ipr_ioarcb *ioarcb;
4861 struct ipr_cmnd *ipr_cmd;
4862 int rc = 0;
4863
4864 scsi_cmd->scsi_done = done;
4865 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4866 res = scsi_cmd->device->hostdata;
4867 scsi_cmd->result = (DID_OK << 16);
4868
4869 /*
4870 * We are currently blocking all devices due to a host reset
4871 * We have told the host to stop giving us new requests, but
4872 * ERP ops don't count. FIXME
4873 */
4874 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4875 return SCSI_MLQUEUE_HOST_BUSY;
4876
4877 /*
4878 * FIXME - Create scsi_set_host_offline interface
4879 * and the ioa_is_dead check can be removed
4880 */
4881 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4882 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4883 scsi_cmd->result = (DID_NO_CONNECT << 16);
4884 scsi_cmd->scsi_done(scsi_cmd);
4885 return 0;
4886 }
4887
Brian King35a39692006-09-25 12:39:20 -05004888 if (ipr_is_gata(res) && res->sata_port)
4889 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
4890
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4892 ioarcb = &ipr_cmd->ioarcb;
4893 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4894
4895 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4896 ipr_cmd->scsi_cmd = scsi_cmd;
4897 ioarcb->res_handle = res->cfgte.res_handle;
4898 ipr_cmd->done = ipr_scsi_done;
4899 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4900
4901 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4902 if (scsi_cmd->underflow == 0)
4903 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4904
4905 if (res->needs_sync_complete) {
4906 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4907 res->needs_sync_complete = 0;
4908 }
4909
4910 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4911 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4912 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4913 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4914 }
4915
4916 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4917 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4918 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4919
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 if (likely(rc == 0))
4921 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4922
4923 if (likely(rc == 0)) {
4924 mb();
4925 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4926 ioa_cfg->regs.ioarrin_reg);
4927 } else {
4928 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4929 return SCSI_MLQUEUE_HOST_BUSY;
4930 }
4931
4932 return 0;
4933}
4934
4935/**
Brian King35a39692006-09-25 12:39:20 -05004936 * ipr_ioctl - IOCTL handler
4937 * @sdev: scsi device struct
4938 * @cmd: IOCTL cmd
4939 * @arg: IOCTL arg
4940 *
4941 * Return value:
4942 * 0 on success / other on failure
4943 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06004944static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05004945{
4946 struct ipr_resource_entry *res;
4947
4948 res = (struct ipr_resource_entry *)sdev->hostdata;
4949 if (res && ipr_is_gata(res))
4950 return ata_scsi_ioctl(sdev, cmd, arg);
4951
4952 return -EINVAL;
4953}
4954
4955/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 * ipr_info - Get information about the card/driver
4957 * @scsi_host: scsi host struct
4958 *
4959 * Return value:
4960 * pointer to buffer with description string
4961 **/
4962static const char * ipr_ioa_info(struct Scsi_Host *host)
4963{
4964 static char buffer[512];
4965 struct ipr_ioa_cfg *ioa_cfg;
4966 unsigned long lock_flags = 0;
4967
4968 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4969
4970 spin_lock_irqsave(host->host_lock, lock_flags);
4971 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4972 spin_unlock_irqrestore(host->host_lock, lock_flags);
4973
4974 return buffer;
4975}
4976
4977static struct scsi_host_template driver_template = {
4978 .module = THIS_MODULE,
4979 .name = "IPR",
4980 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05004981 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982 .queuecommand = ipr_queuecommand,
4983 .eh_abort_handler = ipr_eh_abort,
4984 .eh_device_reset_handler = ipr_eh_dev_reset,
4985 .eh_host_reset_handler = ipr_eh_host_reset,
4986 .slave_alloc = ipr_slave_alloc,
4987 .slave_configure = ipr_slave_configure,
4988 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05004989 .target_alloc = ipr_target_alloc,
4990 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991 .change_queue_depth = ipr_change_queue_depth,
4992 .change_queue_type = ipr_change_queue_type,
4993 .bios_param = ipr_biosparam,
4994 .can_queue = IPR_MAX_COMMANDS,
4995 .this_id = -1,
4996 .sg_tablesize = IPR_MAX_SGLIST,
4997 .max_sectors = IPR_IOA_MAX_SECTORS,
4998 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4999 .use_clustering = ENABLE_CLUSTERING,
5000 .shost_attrs = ipr_ioa_attrs,
5001 .sdev_attrs = ipr_dev_attrs,
5002 .proc_name = IPR_NAME
5003};
5004
Brian King35a39692006-09-25 12:39:20 -05005005/**
5006 * ipr_ata_phy_reset - libata phy_reset handler
5007 * @ap: ata port to reset
5008 *
5009 **/
5010static void ipr_ata_phy_reset(struct ata_port *ap)
5011{
5012 unsigned long flags;
5013 struct ipr_sata_port *sata_port = ap->private_data;
5014 struct ipr_resource_entry *res = sata_port->res;
5015 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5016 int rc;
5017
5018 ENTER;
5019 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5020 while(ioa_cfg->in_reset_reload) {
5021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5023 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5024 }
5025
5026 if (!ioa_cfg->allow_cmds)
5027 goto out_unlock;
5028
5029 rc = ipr_device_reset(ioa_cfg, res);
5030
5031 if (rc) {
5032 ap->ops->port_disable(ap);
5033 goto out_unlock;
5034 }
5035
5036 switch(res->cfgte.proto) {
5037 case IPR_PROTO_SATA:
5038 case IPR_PROTO_SAS_STP:
5039 ap->device[0].class = ATA_DEV_ATA;
5040 break;
5041 case IPR_PROTO_SATA_ATAPI:
5042 case IPR_PROTO_SAS_STP_ATAPI:
5043 ap->device[0].class = ATA_DEV_ATAPI;
5044 break;
5045 default:
5046 ap->device[0].class = ATA_DEV_UNKNOWN;
5047 ap->ops->port_disable(ap);
5048 break;
5049 };
5050
5051out_unlock:
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5053 LEAVE;
5054}
5055
5056/**
5057 * ipr_ata_post_internal - Cleanup after an internal command
5058 * @qc: ATA queued command
5059 *
5060 * Return value:
5061 * none
5062 **/
5063static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5064{
5065 struct ipr_sata_port *sata_port = qc->ap->private_data;
5066 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5067 struct ipr_cmnd *ipr_cmd;
5068 unsigned long flags;
5069
5070 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King73d98ff2006-11-21 10:27:58 -06005071 while(ioa_cfg->in_reset_reload) {
5072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5073 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5074 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5075 }
5076
Brian King35a39692006-09-25 12:39:20 -05005077 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5078 if (ipr_cmd->qc == qc) {
5079 ipr_device_reset(ioa_cfg, sata_port->res);
5080 break;
5081 }
5082 }
5083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5084}
5085
5086/**
5087 * ipr_tf_read - Read the current ATA taskfile for the ATA port
5088 * @ap: ATA port
5089 * @tf: destination ATA taskfile
5090 *
5091 * Return value:
5092 * none
5093 **/
5094static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
5095{
5096 struct ipr_sata_port *sata_port = ap->private_data;
5097 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5098
5099 tf->feature = g->error;
5100 tf->nsect = g->nsect;
5101 tf->lbal = g->lbal;
5102 tf->lbam = g->lbam;
5103 tf->lbah = g->lbah;
5104 tf->device = g->device;
5105 tf->command = g->status;
5106 tf->hob_nsect = g->hob_nsect;
5107 tf->hob_lbal = g->hob_lbal;
5108 tf->hob_lbam = g->hob_lbam;
5109 tf->hob_lbah = g->hob_lbah;
5110 tf->ctl = g->alt_status;
5111}
5112
5113/**
5114 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5115 * @regs: destination
5116 * @tf: source ATA taskfile
5117 *
5118 * Return value:
5119 * none
5120 **/
5121static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5122 struct ata_taskfile *tf)
5123{
5124 regs->feature = tf->feature;
5125 regs->nsect = tf->nsect;
5126 regs->lbal = tf->lbal;
5127 regs->lbam = tf->lbam;
5128 regs->lbah = tf->lbah;
5129 regs->device = tf->device;
5130 regs->command = tf->command;
5131 regs->hob_feature = tf->hob_feature;
5132 regs->hob_nsect = tf->hob_nsect;
5133 regs->hob_lbal = tf->hob_lbal;
5134 regs->hob_lbam = tf->hob_lbam;
5135 regs->hob_lbah = tf->hob_lbah;
5136 regs->ctl = tf->ctl;
5137}
5138
5139/**
5140 * ipr_sata_done - done function for SATA commands
5141 * @ipr_cmd: ipr command struct
5142 *
5143 * This function is invoked by the interrupt handler for
5144 * ops generated by the SCSI mid-layer to SATA devices
5145 *
5146 * Return value:
5147 * none
5148 **/
5149static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5150{
5151 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5152 struct ata_queued_cmd *qc = ipr_cmd->qc;
5153 struct ipr_sata_port *sata_port = qc->ap->private_data;
5154 struct ipr_resource_entry *res = sata_port->res;
5155 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5156
5157 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5158 sizeof(struct ipr_ioasa_gata));
5159 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5160
5161 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5162 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
5163 res->cfgte.res_addr.target);
5164
5165 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5166 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5167 else
5168 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5169 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5170 ata_qc_complete(qc);
5171}
5172
5173/**
5174 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5175 * @ipr_cmd: ipr command struct
5176 * @qc: ATA queued command
5177 *
5178 **/
5179static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5180 struct ata_queued_cmd *qc)
5181{
5182 u32 ioadl_flags = 0;
5183 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5184 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5185 int len = qc->nbytes + qc->pad_len;
5186 struct scatterlist *sg;
5187
5188 if (len == 0)
5189 return;
5190
5191 if (qc->dma_dir == DMA_TO_DEVICE) {
5192 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5193 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5194 ioarcb->write_data_transfer_length = cpu_to_be32(len);
5195 ioarcb->write_ioadl_len =
5196 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5197 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5198 ioadl_flags = IPR_IOADL_FLAGS_READ;
5199 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5200 ioarcb->read_ioadl_len =
5201 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5202 }
5203
5204 ata_for_each_sg(sg, qc) {
5205 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5206 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5207 if (ata_sg_is_last(sg, qc))
5208 ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5209 else
5210 ioadl++;
5211 }
5212}
5213
5214/**
5215 * ipr_qc_issue - Issue a SATA qc to a device
5216 * @qc: queued command
5217 *
5218 * Return value:
5219 * 0 if success
5220 **/
5221static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5222{
5223 struct ata_port *ap = qc->ap;
5224 struct ipr_sata_port *sata_port = ap->private_data;
5225 struct ipr_resource_entry *res = sata_port->res;
5226 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5227 struct ipr_cmnd *ipr_cmd;
5228 struct ipr_ioarcb *ioarcb;
5229 struct ipr_ioarcb_ata_regs *regs;
5230
5231 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
Brian King0feeed82007-03-29 12:43:43 -05005232 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05005233
5234 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5235 ioarcb = &ipr_cmd->ioarcb;
5236 regs = &ioarcb->add_data.u.regs;
5237
5238 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
5239 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
5240
5241 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5242 ipr_cmd->qc = qc;
5243 ipr_cmd->done = ipr_sata_done;
5244 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
5245 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5246 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5247 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5248 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
5249
5250 ipr_build_ata_ioadl(ipr_cmd, qc);
5251 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5252 ipr_copy_sata_tf(regs, &qc->tf);
5253 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5254 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
5255
5256 switch (qc->tf.protocol) {
5257 case ATA_PROT_NODATA:
5258 case ATA_PROT_PIO:
5259 break;
5260
5261 case ATA_PROT_DMA:
5262 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5263 break;
5264
5265 case ATA_PROT_ATAPI:
5266 case ATA_PROT_ATAPI_NODATA:
5267 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5268 break;
5269
5270 case ATA_PROT_ATAPI_DMA:
5271 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5272 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5273 break;
5274
5275 default:
5276 WARN_ON(1);
Brian King0feeed82007-03-29 12:43:43 -05005277 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05005278 }
5279
5280 mb();
5281 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
5282 ioa_cfg->regs.ioarrin_reg);
5283 return 0;
5284}
5285
5286/**
5287 * ipr_ata_check_status - Return last ATA status
5288 * @ap: ATA port
5289 *
5290 * Return value:
5291 * ATA status
5292 **/
5293static u8 ipr_ata_check_status(struct ata_port *ap)
5294{
5295 struct ipr_sata_port *sata_port = ap->private_data;
5296 return sata_port->ioasa.status;
5297}
5298
5299/**
5300 * ipr_ata_check_altstatus - Return last ATA altstatus
5301 * @ap: ATA port
5302 *
5303 * Return value:
5304 * Alt ATA status
5305 **/
5306static u8 ipr_ata_check_altstatus(struct ata_port *ap)
5307{
5308 struct ipr_sata_port *sata_port = ap->private_data;
5309 return sata_port->ioasa.alt_status;
5310}
5311
5312static struct ata_port_operations ipr_sata_ops = {
5313 .port_disable = ata_port_disable,
5314 .check_status = ipr_ata_check_status,
5315 .check_altstatus = ipr_ata_check_altstatus,
5316 .dev_select = ata_noop_dev_select,
5317 .phy_reset = ipr_ata_phy_reset,
5318 .post_internal_cmd = ipr_ata_post_internal,
5319 .tf_read = ipr_tf_read,
5320 .qc_prep = ata_noop_qc_prep,
5321 .qc_issue = ipr_qc_issue,
5322 .port_start = ata_sas_port_start,
5323 .port_stop = ata_sas_port_stop
5324};
5325
5326static struct ata_port_info sata_port_info = {
5327 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5328 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5329 .pio_mask = 0x10, /* pio4 */
5330 .mwdma_mask = 0x07,
5331 .udma_mask = 0x7f, /* udma0-6 */
5332 .port_ops = &ipr_sata_ops
5333};
5334
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335#ifdef CONFIG_PPC_PSERIES
5336static const u16 ipr_blocked_processors[] = {
5337 PV_NORTHSTAR,
5338 PV_PULSAR,
5339 PV_POWER4,
5340 PV_ICESTAR,
5341 PV_SSTAR,
5342 PV_POWER4p,
5343 PV_630,
5344 PV_630p
5345};
5346
5347/**
5348 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
5349 * @ioa_cfg: ioa cfg struct
5350 *
5351 * Adapters that use Gemstone revision < 3.1 do not work reliably on
5352 * certain pSeries hardware. This function determines if the given
5353 * adapter is in one of these confgurations or not.
5354 *
5355 * Return value:
5356 * 1 if adapter is not supported / 0 if adapter is supported
5357 **/
5358static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
5359{
5360 u8 rev_id;
5361 int i;
5362
5363 if (ioa_cfg->type == 0x5702) {
5364 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
5365 &rev_id) == PCIBIOS_SUCCESSFUL) {
5366 if (rev_id < 4) {
5367 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
5368 if (__is_processor(ipr_blocked_processors[i]))
5369 return 1;
5370 }
5371 }
5372 }
5373 }
5374 return 0;
5375}
5376#else
5377#define ipr_invalid_adapter(ioa_cfg) 0
5378#endif
5379
5380/**
5381 * ipr_ioa_bringdown_done - IOA bring down completion.
5382 * @ipr_cmd: ipr command struct
5383 *
5384 * This function processes the completion of an adapter bring down.
5385 * It wakes any reset sleepers.
5386 *
5387 * Return value:
5388 * IPR_RC_JOB_RETURN
5389 **/
5390static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
5391{
5392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5393
5394 ENTER;
5395 ioa_cfg->in_reset_reload = 0;
5396 ioa_cfg->reset_retries = 0;
5397 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5398 wake_up_all(&ioa_cfg->reset_wait_q);
5399
5400 spin_unlock_irq(ioa_cfg->host->host_lock);
5401 scsi_unblock_requests(ioa_cfg->host);
5402 spin_lock_irq(ioa_cfg->host->host_lock);
5403 LEAVE;
5404
5405 return IPR_RC_JOB_RETURN;
5406}
5407
5408/**
5409 * ipr_ioa_reset_done - IOA reset completion.
5410 * @ipr_cmd: ipr command struct
5411 *
5412 * This function processes the completion of an adapter reset.
5413 * It schedules any necessary mid-layer add/removes and
5414 * wakes any reset sleepers.
5415 *
5416 * Return value:
5417 * IPR_RC_JOB_RETURN
5418 **/
5419static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
5420{
5421 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5422 struct ipr_resource_entry *res;
5423 struct ipr_hostrcb *hostrcb, *temp;
5424 int i = 0;
5425
5426 ENTER;
5427 ioa_cfg->in_reset_reload = 0;
5428 ioa_cfg->allow_cmds = 1;
5429 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06005430 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431
5432 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5433 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
5434 ipr_trace;
5435 break;
5436 }
5437 }
5438 schedule_work(&ioa_cfg->work_q);
5439
5440 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
5441 list_del(&hostrcb->queue);
5442 if (i++ < IPR_NUM_LOG_HCAMS)
5443 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
5444 else
5445 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
5446 }
5447
Brian King6bb04172007-04-26 16:00:08 -05005448 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
5450
5451 ioa_cfg->reset_retries = 0;
5452 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5453 wake_up_all(&ioa_cfg->reset_wait_q);
5454
5455 spin_unlock_irq(ioa_cfg->host->host_lock);
5456 scsi_unblock_requests(ioa_cfg->host);
5457 spin_lock_irq(ioa_cfg->host->host_lock);
5458
5459 if (!ioa_cfg->allow_cmds)
5460 scsi_block_requests(ioa_cfg->host);
5461
5462 LEAVE;
5463 return IPR_RC_JOB_RETURN;
5464}
5465
5466/**
5467 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
5468 * @supported_dev: supported device struct
5469 * @vpids: vendor product id struct
5470 *
5471 * Return value:
5472 * none
5473 **/
5474static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5475 struct ipr_std_inq_vpids *vpids)
5476{
5477 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
5478 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
5479 supported_dev->num_records = 1;
5480 supported_dev->data_length =
5481 cpu_to_be16(sizeof(struct ipr_supported_device));
5482 supported_dev->reserved = 0;
5483}
5484
5485/**
5486 * ipr_set_supported_devs - Send Set Supported Devices for a device
5487 * @ipr_cmd: ipr command struct
5488 *
5489 * This function send a Set Supported Devices to the adapter
5490 *
5491 * Return value:
5492 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5493 **/
5494static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5495{
5496 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5497 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5498 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5499 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5500 struct ipr_resource_entry *res = ipr_cmd->u.res;
5501
5502 ipr_cmd->job_step = ipr_ioa_reset_done;
5503
5504 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06005505 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 continue;
5507
5508 ipr_cmd->u.res = res;
5509 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
5510
5511 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5512 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5513 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5514
5515 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
5516 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5517 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5518
5519 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
5520 sizeof(struct ipr_supported_device));
5521 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
5522 offsetof(struct ipr_misc_cbs, supp_dev));
5523 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5524 ioarcb->write_data_transfer_length =
5525 cpu_to_be32(sizeof(struct ipr_supported_device));
5526
5527 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5528 IPR_SET_SUP_DEVICE_TIMEOUT);
5529
5530 ipr_cmd->job_step = ipr_set_supported_devs;
5531 return IPR_RC_JOB_RETURN;
5532 }
5533
5534 return IPR_RC_JOB_CONTINUE;
5535}
5536
5537/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06005538 * ipr_setup_write_cache - Disable write cache if needed
5539 * @ipr_cmd: ipr command struct
5540 *
5541 * This function sets up adapters write cache to desired setting
5542 *
5543 * Return value:
5544 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5545 **/
5546static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5547{
5548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5549
5550 ipr_cmd->job_step = ipr_set_supported_devs;
5551 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5552 struct ipr_resource_entry, queue);
5553
5554 if (ioa_cfg->cache_state != CACHE_DISABLED)
5555 return IPR_RC_JOB_CONTINUE;
5556
5557 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5558 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5559 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5560 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5561
5562 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5563
5564 return IPR_RC_JOB_RETURN;
5565}
5566
5567/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568 * ipr_get_mode_page - Locate specified mode page
5569 * @mode_pages: mode page buffer
5570 * @page_code: page code to find
5571 * @len: minimum required length for mode page
5572 *
5573 * Return value:
5574 * pointer to mode page / NULL on failure
5575 **/
5576static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
5577 u32 page_code, u32 len)
5578{
5579 struct ipr_mode_page_hdr *mode_hdr;
5580 u32 page_length;
5581 u32 length;
5582
5583 if (!mode_pages || (mode_pages->hdr.length == 0))
5584 return NULL;
5585
5586 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
5587 mode_hdr = (struct ipr_mode_page_hdr *)
5588 (mode_pages->data + mode_pages->hdr.block_desc_len);
5589
5590 while (length) {
5591 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
5592 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
5593 return mode_hdr;
5594 break;
5595 } else {
5596 page_length = (sizeof(struct ipr_mode_page_hdr) +
5597 mode_hdr->page_length);
5598 length -= page_length;
5599 mode_hdr = (struct ipr_mode_page_hdr *)
5600 ((unsigned long)mode_hdr + page_length);
5601 }
5602 }
5603 return NULL;
5604}
5605
5606/**
5607 * ipr_check_term_power - Check for term power errors
5608 * @ioa_cfg: ioa config struct
5609 * @mode_pages: IOAFP mode pages buffer
5610 *
5611 * Check the IOAFP's mode page 28 for term power errors
5612 *
5613 * Return value:
5614 * nothing
5615 **/
5616static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
5617 struct ipr_mode_pages *mode_pages)
5618{
5619 int i;
5620 int entry_length;
5621 struct ipr_dev_bus_entry *bus;
5622 struct ipr_mode_page28 *mode_page;
5623
5624 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5625 sizeof(struct ipr_mode_page28));
5626
5627 entry_length = mode_page->entry_length;
5628
5629 bus = mode_page->bus;
5630
5631 for (i = 0; i < mode_page->num_entries; i++) {
5632 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
5633 dev_err(&ioa_cfg->pdev->dev,
5634 "Term power is absent on scsi bus %d\n",
5635 bus->res_addr.bus);
5636 }
5637
5638 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
5639 }
5640}
5641
5642/**
5643 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
5644 * @ioa_cfg: ioa config struct
5645 *
5646 * Looks through the config table checking for SES devices. If
5647 * the SES device is in the SES table indicating a maximum SCSI
5648 * bus speed, the speed is limited for the bus.
5649 *
5650 * Return value:
5651 * none
5652 **/
5653static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
5654{
5655 u32 max_xfer_rate;
5656 int i;
5657
5658 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5659 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
5660 ioa_cfg->bus_attr[i].bus_width);
5661
5662 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
5663 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
5664 }
5665}
5666
5667/**
5668 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
5669 * @ioa_cfg: ioa config struct
5670 * @mode_pages: mode page 28 buffer
5671 *
5672 * Updates mode page 28 based on driver configuration
5673 *
5674 * Return value:
5675 * none
5676 **/
5677static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5678 struct ipr_mode_pages *mode_pages)
5679{
5680 int i, entry_length;
5681 struct ipr_dev_bus_entry *bus;
5682 struct ipr_bus_attributes *bus_attr;
5683 struct ipr_mode_page28 *mode_page;
5684
5685 mode_page = ipr_get_mode_page(mode_pages, 0x28,
5686 sizeof(struct ipr_mode_page28));
5687
5688 entry_length = mode_page->entry_length;
5689
5690 /* Loop for each device bus entry */
5691 for (i = 0, bus = mode_page->bus;
5692 i < mode_page->num_entries;
5693 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
5694 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
5695 dev_err(&ioa_cfg->pdev->dev,
5696 "Invalid resource address reported: 0x%08X\n",
5697 IPR_GET_PHYS_LOC(bus->res_addr));
5698 continue;
5699 }
5700
5701 bus_attr = &ioa_cfg->bus_attr[i];
5702 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
5703 bus->bus_width = bus_attr->bus_width;
5704 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
5705 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
5706 if (bus_attr->qas_enabled)
5707 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
5708 else
5709 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
5710 }
5711}
5712
5713/**
5714 * ipr_build_mode_select - Build a mode select command
5715 * @ipr_cmd: ipr command struct
5716 * @res_handle: resource handle to send command to
5717 * @parm: Byte 2 of Mode Sense command
5718 * @dma_addr: DMA buffer address
5719 * @xfer_len: data transfer length
5720 *
5721 * Return value:
5722 * none
5723 **/
5724static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5725 __be32 res_handle, u8 parm, u32 dma_addr,
5726 u8 xfer_len)
5727{
5728 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5729 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5730
5731 ioarcb->res_handle = res_handle;
5732 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5733 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5734 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
5735 ioarcb->cmd_pkt.cdb[1] = parm;
5736 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5737
5738 ioadl->flags_and_data_len =
5739 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5740 ioadl->address = cpu_to_be32(dma_addr);
5741 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5742 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5743}
5744
5745/**
5746 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
5747 * @ipr_cmd: ipr command struct
5748 *
5749 * This function sets up the SCSI bus attributes and sends
5750 * a Mode Select for Page 28 to activate them.
5751 *
5752 * Return value:
5753 * IPR_RC_JOB_RETURN
5754 **/
5755static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5756{
5757 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5758 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
5759 int length;
5760
5761 ENTER;
Brian King47338042006-02-08 20:57:42 -06005762 ipr_scsi_bus_speed_limit(ioa_cfg);
5763 ipr_check_term_power(ioa_cfg, mode_pages);
5764 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
5765 length = mode_pages->hdr.length + 1;
5766 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005767
5768 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
5769 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5770 length);
5771
brking@us.ibm.com62275042005-11-01 17:01:14 -06005772 ipr_cmd->job_step = ipr_setup_write_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005773 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5774
5775 LEAVE;
5776 return IPR_RC_JOB_RETURN;
5777}
5778
5779/**
5780 * ipr_build_mode_sense - Builds a mode sense command
5781 * @ipr_cmd: ipr command struct
5782 * @res: resource entry struct
5783 * @parm: Byte 2 of mode sense command
5784 * @dma_addr: DMA address of mode sense buffer
5785 * @xfer_len: Size of DMA buffer
5786 *
5787 * Return value:
5788 * none
5789 **/
5790static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5791 __be32 res_handle,
5792 u8 parm, u32 dma_addr, u8 xfer_len)
5793{
5794 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5795 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5796
5797 ioarcb->res_handle = res_handle;
5798 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
5799 ioarcb->cmd_pkt.cdb[2] = parm;
5800 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5801 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5802
5803 ioadl->flags_and_data_len =
5804 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5805 ioadl->address = cpu_to_be32(dma_addr);
5806 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5807 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5808}
5809
5810/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06005811 * ipr_reset_cmd_failed - Handle failure of IOA reset command
5812 * @ipr_cmd: ipr command struct
5813 *
5814 * This function handles the failure of an IOA bringup command.
5815 *
5816 * Return value:
5817 * IPR_RC_JOB_RETURN
5818 **/
5819static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5820{
5821 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5822 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5823
5824 dev_err(&ioa_cfg->pdev->dev,
5825 "0x%02X failed with IOASC: 0x%08X\n",
5826 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5827
5828 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5830 return IPR_RC_JOB_RETURN;
5831}
5832
5833/**
5834 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
5835 * @ipr_cmd: ipr command struct
5836 *
5837 * This function handles the failure of a Mode Sense to the IOAFP.
5838 * Some adapters do not handle all mode pages.
5839 *
5840 * Return value:
5841 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5842 **/
5843static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5844{
5845 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5846
5847 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5848 ipr_cmd->job_step = ipr_setup_write_cache;
5849 return IPR_RC_JOB_CONTINUE;
5850 }
5851
5852 return ipr_reset_cmd_failed(ipr_cmd);
5853}
5854
5855/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005856 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
5857 * @ipr_cmd: ipr command struct
5858 *
5859 * This function send a Page 28 mode sense to the IOA to
5860 * retrieve SCSI bus attributes.
5861 *
5862 * Return value:
5863 * IPR_RC_JOB_RETURN
5864 **/
5865static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
5866{
5867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5868
5869 ENTER;
5870 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
5871 0x28, ioa_cfg->vpd_cbs_dma +
5872 offsetof(struct ipr_misc_cbs, mode_pages),
5873 sizeof(struct ipr_mode_pages));
5874
5875 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06005876 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005877
5878 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5879
5880 LEAVE;
5881 return IPR_RC_JOB_RETURN;
5882}
5883
5884/**
5885 * ipr_init_res_table - Initialize the resource table
5886 * @ipr_cmd: ipr command struct
5887 *
5888 * This function looks through the existing resource table, comparing
5889 * it with the config table. This function will take care of old/new
5890 * devices and schedule adding/removing them from the mid-layer
5891 * as appropriate.
5892 *
5893 * Return value:
5894 * IPR_RC_JOB_CONTINUE
5895 **/
5896static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5897{
5898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5899 struct ipr_resource_entry *res, *temp;
5900 struct ipr_config_table_entry *cfgte;
5901 int found, i;
5902 LIST_HEAD(old_res);
5903
5904 ENTER;
5905 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
5906 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5907
5908 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5909 list_move_tail(&res->queue, &old_res);
5910
5911 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
5912 cfgte = &ioa_cfg->cfg_table->dev[i];
5913 found = 0;
5914
5915 list_for_each_entry_safe(res, temp, &old_res, queue) {
5916 if (!memcmp(&res->cfgte.res_addr,
5917 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5918 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5919 found = 1;
5920 break;
5921 }
5922 }
5923
5924 if (!found) {
5925 if (list_empty(&ioa_cfg->free_res_q)) {
5926 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
5927 break;
5928 }
5929
5930 found = 1;
5931 res = list_entry(ioa_cfg->free_res_q.next,
5932 struct ipr_resource_entry, queue);
5933 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5934 ipr_init_res_entry(res);
5935 res->add_to_ml = 1;
5936 }
5937
5938 if (found)
5939 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
5940 }
5941
5942 list_for_each_entry_safe(res, temp, &old_res, queue) {
5943 if (res->sdev) {
5944 res->del_from_ml = 1;
Brian King1121b792006-03-29 09:37:16 -06005945 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5947 } else {
5948 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5949 }
5950 }
5951
5952 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5953
5954 LEAVE;
5955 return IPR_RC_JOB_CONTINUE;
5956}
5957
5958/**
5959 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5960 * @ipr_cmd: ipr command struct
5961 *
5962 * This function sends a Query IOA Configuration command
5963 * to the adapter to retrieve the IOA configuration table.
5964 *
5965 * Return value:
5966 * IPR_RC_JOB_RETURN
5967 **/
5968static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5969{
5970 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5971 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5972 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5973 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5974
5975 ENTER;
5976 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5977 ucode_vpd->major_release, ucode_vpd->card_type,
5978 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5979 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5980 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5981
5982 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5983 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5984 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5985
5986 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5987 ioarcb->read_data_transfer_length =
5988 cpu_to_be32(sizeof(struct ipr_config_table));
5989
5990 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5991 ioadl->flags_and_data_len =
5992 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5993
5994 ipr_cmd->job_step = ipr_init_res_table;
5995
5996 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5997
5998 LEAVE;
5999 return IPR_RC_JOB_RETURN;
6000}
6001
6002/**
6003 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6004 * @ipr_cmd: ipr command struct
6005 *
6006 * This utility function sends an inquiry to the adapter.
6007 *
6008 * Return value:
6009 * none
6010 **/
6011static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6012 u32 dma_addr, u8 xfer_len)
6013{
6014 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6015 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6016
6017 ENTER;
6018 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6019 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6020
6021 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6022 ioarcb->cmd_pkt.cdb[1] = flags;
6023 ioarcb->cmd_pkt.cdb[2] = page;
6024 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6025
6026 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
6027 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6028
6029 ioadl->address = cpu_to_be32(dma_addr);
6030 ioadl->flags_and_data_len =
6031 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6032
6033 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6034 LEAVE;
6035}
6036
6037/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06006038 * ipr_inquiry_page_supported - Is the given inquiry page supported
6039 * @page0: inquiry page 0 buffer
6040 * @page: page code.
6041 *
6042 * This function determines if the specified inquiry page is supported.
6043 *
6044 * Return value:
6045 * 1 if page is supported / 0 if not
6046 **/
6047static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6048{
6049 int i;
6050
6051 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6052 if (page0->page[i] == page)
6053 return 1;
6054
6055 return 0;
6056}
6057
6058/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006059 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6060 * @ipr_cmd: ipr command struct
6061 *
6062 * This function sends a Page 3 inquiry to the adapter
6063 * to retrieve software VPD information.
6064 *
6065 * Return value:
6066 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6067 **/
6068static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6069{
6070 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006071 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6072
6073 ENTER;
6074
6075 if (!ipr_inquiry_page_supported(page0, 1))
6076 ioa_cfg->cache_state = CACHE_NONE;
6077
6078 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6079
6080 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6081 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6082 sizeof(struct ipr_inquiry_page3));
6083
6084 LEAVE;
6085 return IPR_RC_JOB_RETURN;
6086}
6087
6088/**
6089 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6090 * @ipr_cmd: ipr command struct
6091 *
6092 * This function sends a Page 0 inquiry to the adapter
6093 * to retrieve supported inquiry pages.
6094 *
6095 * Return value:
6096 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6097 **/
6098static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6099{
6100 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006101 char type[5];
6102
6103 ENTER;
6104
6105 /* Grab the type out of the VPD and store it away */
6106 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6107 type[4] = '\0';
6108 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6109
brking@us.ibm.com62275042005-11-01 17:01:14 -06006110 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111
brking@us.ibm.com62275042005-11-01 17:01:14 -06006112 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6113 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6114 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115
6116 LEAVE;
6117 return IPR_RC_JOB_RETURN;
6118}
6119
6120/**
6121 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6122 * @ipr_cmd: ipr command struct
6123 *
6124 * This function sends a standard inquiry to the adapter.
6125 *
6126 * Return value:
6127 * IPR_RC_JOB_RETURN
6128 **/
6129static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6130{
6131 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6132
6133 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006134 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135
6136 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6137 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6138 sizeof(struct ipr_ioa_vpd));
6139
6140 LEAVE;
6141 return IPR_RC_JOB_RETURN;
6142}
6143
6144/**
6145 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6146 * @ipr_cmd: ipr command struct
6147 *
6148 * This function send an Identify Host Request Response Queue
6149 * command to establish the HRRQ with the adapter.
6150 *
6151 * Return value:
6152 * IPR_RC_JOB_RETURN
6153 **/
6154static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6155{
6156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6157 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6158
6159 ENTER;
6160 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6161
6162 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6163 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6164
6165 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6166 ioarcb->cmd_pkt.cdb[2] =
6167 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6168 ioarcb->cmd_pkt.cdb[3] =
6169 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6170 ioarcb->cmd_pkt.cdb[4] =
6171 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6172 ioarcb->cmd_pkt.cdb[5] =
6173 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6174 ioarcb->cmd_pkt.cdb[7] =
6175 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6176 ioarcb->cmd_pkt.cdb[8] =
6177 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6178
6179 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6180
6181 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6182
6183 LEAVE;
6184 return IPR_RC_JOB_RETURN;
6185}
6186
6187/**
6188 * ipr_reset_timer_done - Adapter reset timer function
6189 * @ipr_cmd: ipr command struct
6190 *
6191 * Description: This function is used in adapter reset processing
6192 * for timing events. If the reset_cmd pointer in the IOA
6193 * config struct is not this adapter's we are doing nested
6194 * resets and fail_all_ops will take care of freeing the
6195 * command block.
6196 *
6197 * Return value:
6198 * none
6199 **/
6200static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6201{
6202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6203 unsigned long lock_flags = 0;
6204
6205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6206
6207 if (ioa_cfg->reset_cmd == ipr_cmd) {
6208 list_del(&ipr_cmd->queue);
6209 ipr_cmd->done(ipr_cmd);
6210 }
6211
6212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6213}
6214
6215/**
6216 * ipr_reset_start_timer - Start a timer for adapter reset job
6217 * @ipr_cmd: ipr command struct
6218 * @timeout: timeout value
6219 *
6220 * Description: This function is used in adapter reset processing
6221 * for timing events. If the reset_cmd pointer in the IOA
6222 * config struct is not this adapter's we are doing nested
6223 * resets and fail_all_ops will take care of freeing the
6224 * command block.
6225 *
6226 * Return value:
6227 * none
6228 **/
6229static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6230 unsigned long timeout)
6231{
6232 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6233 ipr_cmd->done = ipr_reset_ioa_job;
6234
6235 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6236 ipr_cmd->timer.expires = jiffies + timeout;
6237 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6238 add_timer(&ipr_cmd->timer);
6239}
6240
6241/**
6242 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6243 * @ioa_cfg: ioa cfg struct
6244 *
6245 * Return value:
6246 * nothing
6247 **/
6248static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6249{
6250 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6251
6252 /* Initialize Host RRQ pointers */
6253 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6254 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
6255 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
6256 ioa_cfg->toggle_bit = 1;
6257
6258 /* Zero out config table */
6259 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
6260}
6261
6262/**
6263 * ipr_reset_enable_ioa - Enable the IOA following a reset.
6264 * @ipr_cmd: ipr command struct
6265 *
6266 * This function reinitializes some control blocks and
6267 * enables destructive diagnostics on the adapter.
6268 *
6269 * Return value:
6270 * IPR_RC_JOB_RETURN
6271 **/
6272static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6273{
6274 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6275 volatile u32 int_reg;
6276
6277 ENTER;
6278 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
6279 ipr_init_ioa_mem(ioa_cfg);
6280
6281 ioa_cfg->allow_interrupts = 1;
6282 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6283
6284 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6285 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6286 ioa_cfg->regs.clr_interrupt_mask_reg);
6287 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6288 return IPR_RC_JOB_CONTINUE;
6289 }
6290
6291 /* Enable destructive diagnostics on IOA */
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006292 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006293
6294 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6295 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6296
6297 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6298
6299 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05006300 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006301 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6302 ipr_cmd->done = ipr_reset_ioa_job;
6303 add_timer(&ipr_cmd->timer);
6304 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6305
6306 LEAVE;
6307 return IPR_RC_JOB_RETURN;
6308}
6309
6310/**
6311 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
6312 * @ipr_cmd: ipr command struct
6313 *
6314 * This function is invoked when an adapter dump has run out
6315 * of processing time.
6316 *
6317 * Return value:
6318 * IPR_RC_JOB_CONTINUE
6319 **/
6320static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
6321{
6322 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6323
6324 if (ioa_cfg->sdt_state == GET_DUMP)
6325 ioa_cfg->sdt_state = ABORT_DUMP;
6326
6327 ipr_cmd->job_step = ipr_reset_alert;
6328
6329 return IPR_RC_JOB_CONTINUE;
6330}
6331
6332/**
6333 * ipr_unit_check_no_data - Log a unit check/no data error log
6334 * @ioa_cfg: ioa config struct
6335 *
6336 * Logs an error indicating the adapter unit checked, but for some
6337 * reason, we were unable to fetch the unit check buffer.
6338 *
6339 * Return value:
6340 * nothing
6341 **/
6342static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
6343{
6344 ioa_cfg->errors_logged++;
6345 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
6346}
6347
6348/**
6349 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
6350 * @ioa_cfg: ioa config struct
6351 *
6352 * Fetches the unit check buffer from the adapter by clocking the data
6353 * through the mailbox register.
6354 *
6355 * Return value:
6356 * nothing
6357 **/
6358static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6359{
6360 unsigned long mailbox;
6361 struct ipr_hostrcb *hostrcb;
6362 struct ipr_uc_sdt sdt;
6363 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05006364 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006365
6366 mailbox = readl(ioa_cfg->ioa_mailbox);
6367
6368 if (!ipr_sdt_is_fmt2(mailbox)) {
6369 ipr_unit_check_no_data(ioa_cfg);
6370 return;
6371 }
6372
6373 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
6374 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6375 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6376
6377 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
6378 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
6379 ipr_unit_check_no_data(ioa_cfg);
6380 return;
6381 }
6382
6383 /* Find length of the first sdt entry (UC buffer) */
6384 length = (be32_to_cpu(sdt.entry[0].end_offset) -
6385 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
6386
6387 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6388 struct ipr_hostrcb, queue);
6389 list_del(&hostrcb->queue);
6390 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6391
6392 rc = ipr_get_ldump_data_section(ioa_cfg,
6393 be32_to_cpu(sdt.entry[0].bar_str_offset),
6394 (__be32 *)&hostrcb->hcam,
6395 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6396
Brian King65f56472007-04-26 16:00:12 -05006397 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006398 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05006399 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
6400 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6401 ioa_cfg->sdt_state == GET_DUMP)
6402 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6403 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07006404 ipr_unit_check_no_data(ioa_cfg);
6405
6406 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
6407}
6408
6409/**
6410 * ipr_reset_restore_cfg_space - Restore PCI config space.
6411 * @ipr_cmd: ipr command struct
6412 *
6413 * Description: This function restores the saved PCI config space of
6414 * the adapter, fails all outstanding ops back to the callers, and
6415 * fetches the dump/unit check if applicable to this reset.
6416 *
6417 * Return value:
6418 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6419 **/
6420static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6421{
6422 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6423 int rc;
6424
6425 ENTER;
6426 rc = pci_restore_state(ioa_cfg->pdev);
6427
6428 if (rc != PCIBIOS_SUCCESSFUL) {
6429 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6430 return IPR_RC_JOB_CONTINUE;
6431 }
6432
6433 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
6434 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6435 return IPR_RC_JOB_CONTINUE;
6436 }
6437
6438 ipr_fail_all_ops(ioa_cfg);
6439
6440 if (ioa_cfg->ioa_unit_checked) {
6441 ioa_cfg->ioa_unit_checked = 0;
6442 ipr_get_unit_check_buffer(ioa_cfg);
6443 ipr_cmd->job_step = ipr_reset_alert;
6444 ipr_reset_start_timer(ipr_cmd, 0);
6445 return IPR_RC_JOB_RETURN;
6446 }
6447
6448 if (ioa_cfg->in_ioa_bringdown) {
6449 ipr_cmd->job_step = ipr_ioa_bringdown_done;
6450 } else {
6451 ipr_cmd->job_step = ipr_reset_enable_ioa;
6452
6453 if (GET_DUMP == ioa_cfg->sdt_state) {
6454 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
6455 ipr_cmd->job_step = ipr_reset_wait_for_dump;
6456 schedule_work(&ioa_cfg->work_q);
6457 return IPR_RC_JOB_RETURN;
6458 }
6459 }
6460
6461 ENTER;
6462 return IPR_RC_JOB_CONTINUE;
6463}
6464
6465/**
Brian Kinge619e1a2007-01-23 11:25:37 -06006466 * ipr_reset_bist_done - BIST has completed on the adapter.
6467 * @ipr_cmd: ipr command struct
6468 *
6469 * Description: Unblock config space and resume the reset process.
6470 *
6471 * Return value:
6472 * IPR_RC_JOB_CONTINUE
6473 **/
6474static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
6475{
6476 ENTER;
6477 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6478 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
6479 LEAVE;
6480 return IPR_RC_JOB_CONTINUE;
6481}
6482
6483/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006484 * ipr_reset_start_bist - Run BIST on the adapter.
6485 * @ipr_cmd: ipr command struct
6486 *
6487 * Description: This function runs BIST on the adapter, then delays 2 seconds.
6488 *
6489 * Return value:
6490 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6491 **/
6492static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6493{
6494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6495 int rc;
6496
6497 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07006498 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006499 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6500
6501 if (rc != PCIBIOS_SUCCESSFUL) {
Brian Kinga9aedb02007-03-29 12:43:23 -05006502 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6504 rc = IPR_RC_JOB_CONTINUE;
6505 } else {
Brian Kinge619e1a2007-01-23 11:25:37 -06006506 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
6508 rc = IPR_RC_JOB_RETURN;
6509 }
6510
6511 LEAVE;
6512 return rc;
6513}
6514
6515/**
6516 * ipr_reset_allowed - Query whether or not IOA can be reset
6517 * @ioa_cfg: ioa config struct
6518 *
6519 * Return value:
6520 * 0 if reset not allowed / non-zero if reset is allowed
6521 **/
6522static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
6523{
6524 volatile u32 temp_reg;
6525
6526 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
6527 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
6528}
6529
6530/**
6531 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
6532 * @ipr_cmd: ipr command struct
6533 *
6534 * Description: This function waits for adapter permission to run BIST,
6535 * then runs BIST. If the adapter does not give permission after a
6536 * reasonable time, we will reset the adapter anyway. The impact of
6537 * resetting the adapter without warning the adapter is the risk of
6538 * losing the persistent error log on the adapter. If the adapter is
6539 * reset while it is writing to the flash on the adapter, the flash
6540 * segment will have bad ECC and be zeroed.
6541 *
6542 * Return value:
6543 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6544 **/
6545static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
6546{
6547 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6548 int rc = IPR_RC_JOB_RETURN;
6549
6550 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
6551 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
6552 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6553 } else {
6554 ipr_cmd->job_step = ipr_reset_start_bist;
6555 rc = IPR_RC_JOB_CONTINUE;
6556 }
6557
6558 return rc;
6559}
6560
6561/**
6562 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
6563 * @ipr_cmd: ipr command struct
6564 *
6565 * Description: This function alerts the adapter that it will be reset.
6566 * If memory space is not currently enabled, proceed directly
6567 * to running BIST on the adapter. The timer must always be started
6568 * so we guarantee we do not run BIST from ipr_isr.
6569 *
6570 * Return value:
6571 * IPR_RC_JOB_RETURN
6572 **/
6573static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6574{
6575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6576 u16 cmd_reg;
6577 int rc;
6578
6579 ENTER;
6580 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
6581
6582 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6583 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6584 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
6585 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6586 } else {
6587 ipr_cmd->job_step = ipr_reset_start_bist;
6588 }
6589
6590 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
6591 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
6592
6593 LEAVE;
6594 return IPR_RC_JOB_RETURN;
6595}
6596
6597/**
6598 * ipr_reset_ucode_download_done - Microcode download completion
6599 * @ipr_cmd: ipr command struct
6600 *
6601 * Description: This function unmaps the microcode download buffer.
6602 *
6603 * Return value:
6604 * IPR_RC_JOB_CONTINUE
6605 **/
6606static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
6607{
6608 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6609 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6610
6611 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
6612 sglist->num_sg, DMA_TO_DEVICE);
6613
6614 ipr_cmd->job_step = ipr_reset_alert;
6615 return IPR_RC_JOB_CONTINUE;
6616}
6617
6618/**
6619 * ipr_reset_ucode_download - Download microcode to the adapter
6620 * @ipr_cmd: ipr command struct
6621 *
6622 * Description: This function checks to see if it there is microcode
6623 * to download to the adapter. If there is, a download is performed.
6624 *
6625 * Return value:
6626 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6627 **/
6628static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6629{
6630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6631 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
6632
6633 ENTER;
6634 ipr_cmd->job_step = ipr_reset_alert;
6635
6636 if (!sglist)
6637 return IPR_RC_JOB_CONTINUE;
6638
6639 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6640 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6641 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
6642 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
6643 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
6644 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6645 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6646
brking@us.ibm.com12baa422005-11-01 17:01:27 -06006647 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6649
6650 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6651 IPR_WRITE_BUFFER_TIMEOUT);
6652
6653 LEAVE;
6654 return IPR_RC_JOB_RETURN;
6655}
6656
6657/**
6658 * ipr_reset_shutdown_ioa - Shutdown the adapter
6659 * @ipr_cmd: ipr command struct
6660 *
6661 * Description: This function issues an adapter shutdown of the
6662 * specified type to the specified adapter as part of the
6663 * adapter reset job.
6664 *
6665 * Return value:
6666 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6667 **/
6668static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
6669{
6670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6671 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
6672 unsigned long timeout;
6673 int rc = IPR_RC_JOB_CONTINUE;
6674
6675 ENTER;
6676 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
6677 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6678 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6679 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
6680 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
6681
6682 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
6683 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
6684 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
6685 timeout = IPR_INTERNAL_TIMEOUT;
6686 else
6687 timeout = IPR_SHUTDOWN_TIMEOUT;
6688
6689 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
6690
6691 rc = IPR_RC_JOB_RETURN;
6692 ipr_cmd->job_step = ipr_reset_ucode_download;
6693 } else
6694 ipr_cmd->job_step = ipr_reset_alert;
6695
6696 LEAVE;
6697 return rc;
6698}
6699
6700/**
6701 * ipr_reset_ioa_job - Adapter reset job
6702 * @ipr_cmd: ipr command struct
6703 *
6704 * Description: This function is the job router for the adapter reset job.
6705 *
6706 * Return value:
6707 * none
6708 **/
6709static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
6710{
6711 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6713
6714 do {
6715 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6716
6717 if (ioa_cfg->reset_cmd != ipr_cmd) {
6718 /*
6719 * We are doing nested adapter resets and this is
6720 * not the current reset job.
6721 */
6722 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6723 return;
6724 }
6725
6726 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006727 rc = ipr_cmd->job_step_failed(ipr_cmd);
6728 if (rc == IPR_RC_JOB_RETURN)
6729 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730 }
6731
6732 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006733 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006734 rc = ipr_cmd->job_step(ipr_cmd);
6735 } while(rc == IPR_RC_JOB_CONTINUE);
6736}
6737
6738/**
6739 * _ipr_initiate_ioa_reset - Initiate an adapter reset
6740 * @ioa_cfg: ioa config struct
6741 * @job_step: first job step of reset job
6742 * @shutdown_type: shutdown type
6743 *
6744 * Description: This function will initiate the reset of the given adapter
6745 * starting at the selected job step.
6746 * If the caller needs to wait on the completion of the reset,
6747 * the caller must sleep on the reset_wait_q.
6748 *
6749 * Return value:
6750 * none
6751 **/
6752static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6753 int (*job_step) (struct ipr_cmnd *),
6754 enum ipr_shutdown_type shutdown_type)
6755{
6756 struct ipr_cmnd *ipr_cmd;
6757
6758 ioa_cfg->in_reset_reload = 1;
6759 ioa_cfg->allow_cmds = 0;
6760 scsi_block_requests(ioa_cfg->host);
6761
6762 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6763 ioa_cfg->reset_cmd = ipr_cmd;
6764 ipr_cmd->job_step = job_step;
6765 ipr_cmd->u.shutdown_type = shutdown_type;
6766
6767 ipr_reset_ioa_job(ipr_cmd);
6768}
6769
6770/**
6771 * ipr_initiate_ioa_reset - Initiate an adapter reset
6772 * @ioa_cfg: ioa config struct
6773 * @shutdown_type: shutdown type
6774 *
6775 * Description: This function will initiate the reset of the given adapter.
6776 * If the caller needs to wait on the completion of the reset,
6777 * the caller must sleep on the reset_wait_q.
6778 *
6779 * Return value:
6780 * none
6781 **/
6782static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
6783 enum ipr_shutdown_type shutdown_type)
6784{
6785 if (ioa_cfg->ioa_is_dead)
6786 return;
6787
6788 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
6789 ioa_cfg->sdt_state = ABORT_DUMP;
6790
6791 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
6792 dev_err(&ioa_cfg->pdev->dev,
6793 "IOA taken offline - error recovery failed\n");
6794
6795 ioa_cfg->reset_retries = 0;
6796 ioa_cfg->ioa_is_dead = 1;
6797
6798 if (ioa_cfg->in_ioa_bringdown) {
6799 ioa_cfg->reset_cmd = NULL;
6800 ioa_cfg->in_reset_reload = 0;
6801 ipr_fail_all_ops(ioa_cfg);
6802 wake_up_all(&ioa_cfg->reset_wait_q);
6803
6804 spin_unlock_irq(ioa_cfg->host->host_lock);
6805 scsi_unblock_requests(ioa_cfg->host);
6806 spin_lock_irq(ioa_cfg->host->host_lock);
6807 return;
6808 } else {
6809 ioa_cfg->in_ioa_bringdown = 1;
6810 shutdown_type = IPR_SHUTDOWN_NONE;
6811 }
6812 }
6813
6814 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
6815 shutdown_type);
6816}
6817
6818/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06006819 * ipr_reset_freeze - Hold off all I/O activity
6820 * @ipr_cmd: ipr command struct
6821 *
6822 * Description: If the PCI slot is frozen, hold off all I/O
6823 * activity; then, as soon as the slot is available again,
6824 * initiate an adapter reset.
6825 */
6826static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
6827{
6828 /* Disallow new interrupts, avoid loop */
6829 ipr_cmd->ioa_cfg->allow_interrupts = 0;
6830 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6831 ipr_cmd->done = ipr_reset_ioa_job;
6832 return IPR_RC_JOB_RETURN;
6833}
6834
6835/**
6836 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
6837 * @pdev: PCI device struct
6838 *
6839 * Description: This routine is called to tell us that the PCI bus
6840 * is down. Can't do anything here, except put the device driver
6841 * into a holding pattern, waiting for the PCI bus to come back.
6842 */
6843static void ipr_pci_frozen(struct pci_dev *pdev)
6844{
6845 unsigned long flags = 0;
6846 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6847
6848 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6849 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
6850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6851}
6852
6853/**
6854 * ipr_pci_slot_reset - Called when PCI slot has been reset.
6855 * @pdev: PCI device struct
6856 *
6857 * Description: This routine is called by the pci error recovery
6858 * code after the PCI slot has been reset, just before we
6859 * should resume normal operations.
6860 */
6861static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
6862{
6863 unsigned long flags = 0;
6864 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6865
6866 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6867 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
6868 IPR_SHUTDOWN_NONE);
6869 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6870 return PCI_ERS_RESULT_RECOVERED;
6871}
6872
6873/**
6874 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
6875 * @pdev: PCI device struct
6876 *
6877 * Description: This routine is called when the PCI bus has
6878 * permanently failed.
6879 */
6880static void ipr_pci_perm_failure(struct pci_dev *pdev)
6881{
6882 unsigned long flags = 0;
6883 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6884
6885 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6886 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6887 ioa_cfg->sdt_state = ABORT_DUMP;
6888 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
6889 ioa_cfg->in_ioa_bringdown = 1;
6890 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6892}
6893
6894/**
6895 * ipr_pci_error_detected - Called when a PCI error is detected.
6896 * @pdev: PCI device struct
6897 * @state: PCI channel state
6898 *
6899 * Description: Called when a PCI error is detected.
6900 *
6901 * Return value:
6902 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6903 */
6904static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
6905 pci_channel_state_t state)
6906{
6907 switch (state) {
6908 case pci_channel_io_frozen:
6909 ipr_pci_frozen(pdev);
6910 return PCI_ERS_RESULT_NEED_RESET;
6911 case pci_channel_io_perm_failure:
6912 ipr_pci_perm_failure(pdev);
6913 return PCI_ERS_RESULT_DISCONNECT;
6914 break;
6915 default:
6916 break;
6917 }
6918 return PCI_ERS_RESULT_NEED_RESET;
6919}
6920
6921/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006922 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
6923 * @ioa_cfg: ioa cfg struct
6924 *
6925 * Description: This is the second phase of adapter intialization
6926 * This function takes care of initilizing the adapter to the point
6927 * where it can accept new commands.
6928
6929 * Return value:
6930 * 0 on sucess / -EIO on failure
6931 **/
6932static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
6933{
6934 int rc = 0;
6935 unsigned long host_lock_flags = 0;
6936
6937 ENTER;
6938 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6939 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06006940 if (ioa_cfg->needs_hard_reset) {
6941 ioa_cfg->needs_hard_reset = 0;
6942 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6943 } else
6944 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
6945 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946
6947 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6948 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6949 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6950
6951 if (ioa_cfg->ioa_is_dead) {
6952 rc = -EIO;
6953 } else if (ipr_invalid_adapter(ioa_cfg)) {
6954 if (!ipr_testmode)
6955 rc = -EIO;
6956
6957 dev_err(&ioa_cfg->pdev->dev,
6958 "Adapter not supported in this hardware configuration.\n");
6959 }
6960
6961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6962
6963 LEAVE;
6964 return rc;
6965}
6966
6967/**
6968 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6969 * @ioa_cfg: ioa config struct
6970 *
6971 * Return value:
6972 * none
6973 **/
6974static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6975{
6976 int i;
6977
6978 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6979 if (ioa_cfg->ipr_cmnd_list[i])
6980 pci_pool_free(ioa_cfg->ipr_cmd_pool,
6981 ioa_cfg->ipr_cmnd_list[i],
6982 ioa_cfg->ipr_cmnd_list_dma[i]);
6983
6984 ioa_cfg->ipr_cmnd_list[i] = NULL;
6985 }
6986
6987 if (ioa_cfg->ipr_cmd_pool)
6988 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6989
6990 ioa_cfg->ipr_cmd_pool = NULL;
6991}
6992
6993/**
6994 * ipr_free_mem - Frees memory allocated for an adapter
6995 * @ioa_cfg: ioa cfg struct
6996 *
6997 * Return value:
6998 * nothing
6999 **/
7000static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7001{
7002 int i;
7003
7004 kfree(ioa_cfg->res_entries);
7005 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7006 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7007 ipr_free_cmd_blks(ioa_cfg);
7008 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7009 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7010 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
7011 ioa_cfg->cfg_table,
7012 ioa_cfg->cfg_table_dma);
7013
7014 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7015 pci_free_consistent(ioa_cfg->pdev,
7016 sizeof(struct ipr_hostrcb),
7017 ioa_cfg->hostrcb[i],
7018 ioa_cfg->hostrcb_dma[i]);
7019 }
7020
7021 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007022 kfree(ioa_cfg->trace);
7023}
7024
7025/**
7026 * ipr_free_all_resources - Free all allocated resources for an adapter.
7027 * @ipr_cmd: ipr command struct
7028 *
7029 * This function frees all allocated resources for the
7030 * specified adapter.
7031 *
7032 * Return value:
7033 * none
7034 **/
7035static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7036{
7037 struct pci_dev *pdev = ioa_cfg->pdev;
7038
7039 ENTER;
7040 free_irq(pdev->irq, ioa_cfg);
7041 iounmap(ioa_cfg->hdw_dma_regs);
7042 pci_release_regions(pdev);
7043 ipr_free_mem(ioa_cfg);
7044 scsi_host_put(ioa_cfg->host);
7045 pci_disable_device(pdev);
7046 LEAVE;
7047}
7048
7049/**
7050 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7051 * @ioa_cfg: ioa config struct
7052 *
7053 * Return value:
7054 * 0 on success / -ENOMEM on allocation failure
7055 **/
7056static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7057{
7058 struct ipr_cmnd *ipr_cmd;
7059 struct ipr_ioarcb *ioarcb;
7060 dma_addr_t dma_addr;
7061 int i;
7062
7063 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7064 sizeof(struct ipr_cmnd), 8, 0);
7065
7066 if (!ioa_cfg->ipr_cmd_pool)
7067 return -ENOMEM;
7068
7069 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Christoph Lametere94b1762006-12-06 20:33:17 -08007070 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007071
7072 if (!ipr_cmd) {
7073 ipr_free_cmd_blks(ioa_cfg);
7074 return -ENOMEM;
7075 }
7076
7077 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7078 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7079 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7080
7081 ioarcb = &ipr_cmd->ioarcb;
7082 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7083 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7084 ioarcb->write_ioadl_addr =
7085 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
7086 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7087 ioarcb->ioasa_host_pci_addr =
7088 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7089 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7090 ipr_cmd->cmd_index = i;
7091 ipr_cmd->ioa_cfg = ioa_cfg;
7092 ipr_cmd->sense_buffer_dma = dma_addr +
7093 offsetof(struct ipr_cmnd, sense_buffer);
7094
7095 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7096 }
7097
7098 return 0;
7099}
7100
7101/**
7102 * ipr_alloc_mem - Allocate memory for an adapter
7103 * @ioa_cfg: ioa config struct
7104 *
7105 * Return value:
7106 * 0 on success / non-zero for error
7107 **/
7108static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7109{
7110 struct pci_dev *pdev = ioa_cfg->pdev;
7111 int i, rc = -ENOMEM;
7112
7113 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06007114 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007115 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
7116
7117 if (!ioa_cfg->res_entries)
7118 goto out;
7119
Linus Torvalds1da177e2005-04-16 15:20:36 -07007120 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
7121 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
7122
7123 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7124 sizeof(struct ipr_misc_cbs),
7125 &ioa_cfg->vpd_cbs_dma);
7126
7127 if (!ioa_cfg->vpd_cbs)
7128 goto out_free_res_entries;
7129
7130 if (ipr_alloc_cmd_blks(ioa_cfg))
7131 goto out_free_vpd_cbs;
7132
7133 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7134 sizeof(u32) * IPR_NUM_CMD_BLKS,
7135 &ioa_cfg->host_rrq_dma);
7136
7137 if (!ioa_cfg->host_rrq)
7138 goto out_ipr_free_cmd_blocks;
7139
7140 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7141 sizeof(struct ipr_config_table),
7142 &ioa_cfg->cfg_table_dma);
7143
7144 if (!ioa_cfg->cfg_table)
7145 goto out_free_host_rrq;
7146
7147 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7148 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7149 sizeof(struct ipr_hostrcb),
7150 &ioa_cfg->hostrcb_dma[i]);
7151
7152 if (!ioa_cfg->hostrcb[i])
7153 goto out_free_hostrcb_dma;
7154
7155 ioa_cfg->hostrcb[i]->hostrcb_dma =
7156 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06007157 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007158 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7159 }
7160
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06007161 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7163
7164 if (!ioa_cfg->trace)
7165 goto out_free_hostrcb_dma;
7166
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167 rc = 0;
7168out:
7169 LEAVE;
7170 return rc;
7171
7172out_free_hostrcb_dma:
7173 while (i-- > 0) {
7174 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
7175 ioa_cfg->hostrcb[i],
7176 ioa_cfg->hostrcb_dma[i]);
7177 }
7178 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
7179 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
7180out_free_host_rrq:
7181 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7182 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7183out_ipr_free_cmd_blocks:
7184 ipr_free_cmd_blks(ioa_cfg);
7185out_free_vpd_cbs:
7186 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
7187 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7188out_free_res_entries:
7189 kfree(ioa_cfg->res_entries);
7190 goto out;
7191}
7192
7193/**
7194 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
7195 * @ioa_cfg: ioa config struct
7196 *
7197 * Return value:
7198 * none
7199 **/
7200static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
7201{
7202 int i;
7203
7204 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7205 ioa_cfg->bus_attr[i].bus = i;
7206 ioa_cfg->bus_attr[i].qas_enabled = 0;
7207 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
7208 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
7209 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
7210 else
7211 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
7212 }
7213}
7214
7215/**
7216 * ipr_init_ioa_cfg - Initialize IOA config struct
7217 * @ioa_cfg: ioa config struct
7218 * @host: scsi host struct
7219 * @pdev: PCI dev struct
7220 *
7221 * Return value:
7222 * none
7223 **/
7224static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7225 struct Scsi_Host *host, struct pci_dev *pdev)
7226{
7227 const struct ipr_interrupt_offsets *p;
7228 struct ipr_interrupts *t;
7229 void __iomem *base;
7230
7231 ioa_cfg->host = host;
7232 ioa_cfg->pdev = pdev;
7233 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007234 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7236 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7237 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
7238 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
7239 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
7240 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
7241 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
7242 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
7243
7244 INIT_LIST_HEAD(&ioa_cfg->free_q);
7245 INIT_LIST_HEAD(&ioa_cfg->pending_q);
7246 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
7247 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
7248 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
7249 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00007250 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7252 ioa_cfg->sdt_state = INACTIVE;
brking@us.ibm.com62275042005-11-01 17:01:14 -06007253 if (ipr_enable_cache)
7254 ioa_cfg->cache_state = CACHE_ENABLED;
7255 else
7256 ioa_cfg->cache_state = CACHE_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007257
7258 ipr_initialize_bus_attr(ioa_cfg);
7259
7260 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
7261 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
7262 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7263 host->unique_id = host->host_no;
7264 host->max_cmd_len = IPR_MAX_CDB_LEN;
7265 pci_set_drvdata(pdev, ioa_cfg);
7266
7267 p = &ioa_cfg->chip_cfg->regs;
7268 t = &ioa_cfg->regs;
7269 base = ioa_cfg->hdw_dma_regs;
7270
7271 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7272 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
7273 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
7274 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
7275 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
7276 t->ioarrin_reg = base + p->ioarrin_reg;
7277 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
7278 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
7279 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
7280}
7281
7282/**
7283 * ipr_get_chip_cfg - Find adapter chip configuration
7284 * @dev_id: PCI device id struct
7285 *
7286 * Return value:
7287 * ptr to chip config on success / NULL on failure
7288 **/
7289static const struct ipr_chip_cfg_t * __devinit
7290ipr_get_chip_cfg(const struct pci_device_id *dev_id)
7291{
7292 int i;
7293
Linus Torvalds1da177e2005-04-16 15:20:36 -07007294 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
7295 if (ipr_chip[i].vendor == dev_id->vendor &&
7296 ipr_chip[i].device == dev_id->device)
7297 return ipr_chip[i].cfg;
7298 return NULL;
7299}
7300
7301/**
7302 * ipr_probe_ioa - Allocates memory and does first stage of initialization
7303 * @pdev: PCI device struct
7304 * @dev_id: PCI device id struct
7305 *
7306 * Return value:
7307 * 0 on success / non-zero on failure
7308 **/
7309static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7310 const struct pci_device_id *dev_id)
7311{
7312 struct ipr_ioa_cfg *ioa_cfg;
7313 struct Scsi_Host *host;
7314 unsigned long ipr_regs_pci;
7315 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07007316 int rc = PCIBIOS_SUCCESSFUL;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06007317 volatile u32 mask, uproc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318
7319 ENTER;
7320
7321 if ((rc = pci_enable_device(pdev))) {
7322 dev_err(&pdev->dev, "Cannot enable adapter\n");
7323 goto out;
7324 }
7325
7326 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
7327
7328 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
7329
7330 if (!host) {
7331 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
7332 rc = -ENOMEM;
7333 goto out_disable;
7334 }
7335
7336 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7337 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Brian King35a39692006-09-25 12:39:20 -05007338 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7339 sata_port_info.flags, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340
7341 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
7342
7343 if (!ioa_cfg->chip_cfg) {
7344 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
7345 dev_id->vendor, dev_id->device);
7346 goto out_scsi_host_put;
7347 }
7348
Brian King5469cb52007-03-29 12:42:40 -05007349 if (ipr_transop_timeout)
7350 ioa_cfg->transop_timeout = ipr_transop_timeout;
7351 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7352 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7353 else
7354 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7355
Linus Torvalds1da177e2005-04-16 15:20:36 -07007356 ipr_regs_pci = pci_resource_start(pdev, 0);
7357
7358 rc = pci_request_regions(pdev, IPR_NAME);
7359 if (rc < 0) {
7360 dev_err(&pdev->dev,
7361 "Couldn't register memory range of registers\n");
7362 goto out_scsi_host_put;
7363 }
7364
7365 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
7366
7367 if (!ipr_regs) {
7368 dev_err(&pdev->dev,
7369 "Couldn't map memory range of registers\n");
7370 rc = -ENOMEM;
7371 goto out_release_regions;
7372 }
7373
7374 ioa_cfg->hdw_dma_regs = ipr_regs;
7375 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
7376 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
7377
7378 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
7379
7380 pci_set_master(pdev);
7381
7382 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7383 if (rc < 0) {
7384 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7385 goto cleanup_nomem;
7386 }
7387
7388 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
7389 ioa_cfg->chip_cfg->cache_line_size);
7390
7391 if (rc != PCIBIOS_SUCCESSFUL) {
7392 dev_err(&pdev->dev, "Write of cache line size failed\n");
7393 rc = -EIO;
7394 goto cleanup_nomem;
7395 }
7396
7397 /* Save away PCI config space for use following IOA reset */
7398 rc = pci_save_state(pdev);
7399
7400 if (rc != PCIBIOS_SUCCESSFUL) {
7401 dev_err(&pdev->dev, "Failed to save PCI config space\n");
7402 rc = -EIO;
7403 goto cleanup_nomem;
7404 }
7405
7406 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
7407 goto cleanup_nomem;
7408
7409 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7410 goto cleanup_nomem;
7411
7412 rc = ipr_alloc_mem(ioa_cfg);
7413 if (rc < 0) {
7414 dev_err(&pdev->dev,
7415 "Couldn't allocate enough memory for device driver!\n");
7416 goto cleanup_nomem;
7417 }
7418
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06007419 /*
7420 * If HRRQ updated interrupt is not masked, or reset alert is set,
7421 * the card is in an unknown state and needs a hard reset
7422 */
7423 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7424 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
7425 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7426 ioa_cfg->needs_hard_reset = 1;
7427
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Thomas Gleixner1d6f3592006-07-01 19:29:42 -07007429 rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007430
7431 if (rc) {
7432 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
7433 pdev->irq, rc);
7434 goto cleanup_nolog;
7435 }
7436
7437 spin_lock(&ipr_driver_lock);
7438 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
7439 spin_unlock(&ipr_driver_lock);
7440
7441 LEAVE;
7442out:
7443 return rc;
7444
7445cleanup_nolog:
7446 ipr_free_mem(ioa_cfg);
7447cleanup_nomem:
7448 iounmap(ipr_regs);
7449out_release_regions:
7450 pci_release_regions(pdev);
7451out_scsi_host_put:
7452 scsi_host_put(host);
7453out_disable:
7454 pci_disable_device(pdev);
7455 goto out;
7456}
7457
7458/**
7459 * ipr_scan_vsets - Scans for VSET devices
7460 * @ioa_cfg: ioa config struct
7461 *
7462 * Description: Since the VSET resources do not follow SAM in that we can have
7463 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
7464 *
7465 * Return value:
7466 * none
7467 **/
7468static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
7469{
7470 int target, lun;
7471
7472 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
7473 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
7474 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
7475}
7476
7477/**
7478 * ipr_initiate_ioa_bringdown - Bring down an adapter
7479 * @ioa_cfg: ioa config struct
7480 * @shutdown_type: shutdown type
7481 *
7482 * Description: This function will initiate bringing down the adapter.
7483 * This consists of issuing an IOA shutdown to the adapter
7484 * to flush the cache, and running BIST.
7485 * If the caller needs to wait on the completion of the reset,
7486 * the caller must sleep on the reset_wait_q.
7487 *
7488 * Return value:
7489 * none
7490 **/
7491static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
7492 enum ipr_shutdown_type shutdown_type)
7493{
7494 ENTER;
7495 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7496 ioa_cfg->sdt_state = ABORT_DUMP;
7497 ioa_cfg->reset_retries = 0;
7498 ioa_cfg->in_ioa_bringdown = 1;
7499 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
7500 LEAVE;
7501}
7502
7503/**
7504 * __ipr_remove - Remove a single adapter
7505 * @pdev: pci device struct
7506 *
7507 * Adapter hot plug remove entry point.
7508 *
7509 * Return value:
7510 * none
7511 **/
7512static void __ipr_remove(struct pci_dev *pdev)
7513{
7514 unsigned long host_lock_flags = 0;
7515 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7516 ENTER;
7517
7518 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Brian King970ea292007-04-26 16:00:06 -05007519 while(ioa_cfg->in_reset_reload) {
7520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7521 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7522 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7523 }
7524
Linus Torvalds1da177e2005-04-16 15:20:36 -07007525 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7526
7527 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7528 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05007529 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7531
7532 spin_lock(&ipr_driver_lock);
7533 list_del(&ioa_cfg->queue);
7534 spin_unlock(&ipr_driver_lock);
7535
7536 if (ioa_cfg->sdt_state == ABORT_DUMP)
7537 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7539
7540 ipr_free_all_resources(ioa_cfg);
7541
7542 LEAVE;
7543}
7544
7545/**
7546 * ipr_remove - IOA hot plug remove entry point
7547 * @pdev: pci device struct
7548 *
7549 * Adapter hot plug remove entry point.
7550 *
7551 * Return value:
7552 * none
7553 **/
7554static void ipr_remove(struct pci_dev *pdev)
7555{
7556 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7557
7558 ENTER;
7559
Linus Torvalds1da177e2005-04-16 15:20:36 -07007560 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7561 &ipr_trace_attr);
7562 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7563 &ipr_dump_attr);
7564 scsi_remove_host(ioa_cfg->host);
7565
7566 __ipr_remove(pdev);
7567
7568 LEAVE;
7569}
7570
7571/**
7572 * ipr_probe - Adapter hot plug add entry point
7573 *
7574 * Return value:
7575 * 0 on success / non-zero on failure
7576 **/
7577static int __devinit ipr_probe(struct pci_dev *pdev,
7578 const struct pci_device_id *dev_id)
7579{
7580 struct ipr_ioa_cfg *ioa_cfg;
7581 int rc;
7582
7583 rc = ipr_probe_ioa(pdev, dev_id);
7584
7585 if (rc)
7586 return rc;
7587
7588 ioa_cfg = pci_get_drvdata(pdev);
7589 rc = ipr_probe_ioa_part2(ioa_cfg);
7590
7591 if (rc) {
7592 __ipr_remove(pdev);
7593 return rc;
7594 }
7595
7596 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
7597
7598 if (rc) {
7599 __ipr_remove(pdev);
7600 return rc;
7601 }
7602
7603 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7604 &ipr_trace_attr);
7605
7606 if (rc) {
7607 scsi_remove_host(ioa_cfg->host);
7608 __ipr_remove(pdev);
7609 return rc;
7610 }
7611
7612 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
7613 &ipr_dump_attr);
7614
7615 if (rc) {
7616 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
7617 &ipr_trace_attr);
7618 scsi_remove_host(ioa_cfg->host);
7619 __ipr_remove(pdev);
7620 return rc;
7621 }
7622
7623 scsi_scan_host(ioa_cfg->host);
7624 ipr_scan_vsets(ioa_cfg);
7625 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
7626 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06007627 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007628 schedule_work(&ioa_cfg->work_q);
7629 return 0;
7630}
7631
7632/**
7633 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07007634 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07007635 *
7636 * This function is invoked upon system shutdown/reboot. It will issue
7637 * an adapter shutdown to the adapter to flush the write cache.
7638 *
7639 * Return value:
7640 * none
7641 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07007642static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007643{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07007644 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007645 unsigned long lock_flags = 0;
7646
7647 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05007648 while(ioa_cfg->in_reset_reload) {
7649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7650 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7651 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7652 }
7653
Linus Torvalds1da177e2005-04-16 15:20:36 -07007654 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
7655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7656 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7657}
7658
7659static struct pci_device_id ipr_pci_table[] __devinitdata = {
7660 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06007661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007662 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06007663 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007664 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06007665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007666 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06007667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007668 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06007669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06007671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007672 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06007673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06007674 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05007675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7676 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06007677 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06007678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06007679 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05007680 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7681 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06007682 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05007683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7684 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06007685 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06007686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06007687 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05007688 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
7689 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06007690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05007691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7692 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06007693 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05007694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7695 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05007696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7698 IPR_USE_LONG_TRANSOP_TIMEOUT },
7699 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7700 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05007702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7703 IPR_USE_LONG_TRANSOP_TIMEOUT },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06007705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007706 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06007707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06007708 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05007709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7710 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06007711 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05007712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7713 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05007714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7716 IPR_USE_LONG_TRANSOP_TIMEOUT },
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717 { }
7718};
7719MODULE_DEVICE_TABLE(pci, ipr_pci_table);
7720
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007721static struct pci_error_handlers ipr_err_handler = {
7722 .error_detected = ipr_pci_error_detected,
7723 .slot_reset = ipr_pci_slot_reset,
7724};
7725
Linus Torvalds1da177e2005-04-16 15:20:36 -07007726static struct pci_driver ipr_driver = {
7727 .name = IPR_NAME,
7728 .id_table = ipr_pci_table,
7729 .probe = ipr_probe,
7730 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07007731 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007732 .err_handler = &ipr_err_handler,
Brian King68c96e52007-04-26 16:00:07 -05007733 .dynids.use_driver_data = 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07007734};
7735
7736/**
7737 * ipr_init - Module entry point
7738 *
7739 * Return value:
7740 * 0 on success / negative value on failure
7741 **/
7742static int __init ipr_init(void)
7743{
7744 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
7745 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
7746
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -07007747 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748}
7749
7750/**
7751 * ipr_exit - Module unload
7752 *
7753 * Module unload entry point.
7754 *
7755 * Return value:
7756 * none
7757 **/
7758static void __exit ipr_exit(void)
7759{
7760 pci_unregister_driver(&ipr_driver);
7761}
7762
7763module_init(ipr_init);
7764module_exit(ipr_exit);