blob: a64fb50858824c42fab0117ed57a765d20f59ca1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050073#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050074#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080075#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080076#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/processor.h>
80#include <scsi/scsi.h>
81#include <scsi/scsi_host.h>
82#include <scsi/scsi_tcq.h>
83#include <scsi/scsi_eh.h>
84#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include "ipr.h"
86
87/*
88 * Global Data
89 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080090static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
92static unsigned int ipr_max_speed = 1;
93static int ipr_testmode = 0;
94static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050095static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060096static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080097static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -050098static unsigned int ipr_dual_ioa_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099static DEFINE_SPINLOCK(ipr_driver_lock);
100
101/* This table describes the differences between DMA controller chips */
102static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 .mailbox = 0x0042C,
105 .cache_line_size = 0x20,
106 {
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
109 .sense_interrupt_mask_reg = 0x0022C,
110 .clr_interrupt_reg = 0x00228,
111 .sense_interrupt_reg = 0x00224,
112 .ioarrin_reg = 0x00404,
113 .sense_uproc_interrupt_reg = 0x00214,
114 .set_uproc_interrupt_reg = 0x00214,
115 .clr_uproc_interrupt_reg = 0x00218
116 }
117 },
118 { /* Snipe and Scamp */
119 .mailbox = 0x0052C,
120 .cache_line_size = 0x20,
121 {
122 .set_interrupt_mask_reg = 0x00288,
123 .clr_interrupt_mask_reg = 0x0028C,
124 .sense_interrupt_mask_reg = 0x00288,
125 .clr_interrupt_reg = 0x00284,
126 .sense_interrupt_reg = 0x00280,
127 .ioarrin_reg = 0x00504,
128 .sense_uproc_interrupt_reg = 0x00290,
129 .set_uproc_interrupt_reg = 0x00290,
130 .clr_uproc_interrupt_reg = 0x00294
131 }
132 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800133 { /* CRoC */
134 .mailbox = 0x00040,
135 .cache_line_size = 0x20,
136 {
137 .set_interrupt_mask_reg = 0x00010,
138 .clr_interrupt_mask_reg = 0x00018,
139 .sense_interrupt_mask_reg = 0x00010,
140 .clr_interrupt_reg = 0x00008,
141 .sense_interrupt_reg = 0x00000,
142 .ioarrin_reg = 0x00070,
143 .sense_uproc_interrupt_reg = 0x00020,
144 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800145 .clr_uproc_interrupt_reg = 0x00028,
146 .dump_addr_reg = 0x00064,
147 .dump_data_reg = 0x00068
Wayne Boyera74c1632010-02-19 13:23:51 -0800148 }
149 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150};
151
152static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyera32c0552010-02-19 13:23:36 -0800153 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
154 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
155 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
156 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
157 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
159 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160};
161
162static int ipr_max_bus_speeds [] = {
163 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
164};
165
166MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
167MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
168module_param_named(max_speed, ipr_max_speed, uint, 0);
169MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
170module_param_named(log_level, ipr_log_level, uint, 0);
171MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
172module_param_named(testmode, ipr_testmode, int, 0);
173MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800174module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
176module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
177MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800178module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600179MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500180module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
181MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800182module_param_named(max_devs, ipr_max_devs, int, 0);
183MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
184 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185MODULE_LICENSE("GPL");
186MODULE_VERSION(IPR_DRIVER_VERSION);
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/* A constant array of IOASCs/URCs/Error Messages */
189static const
190struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500191 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 "8155: An unknown error was received"},
193 {0x00330000, 0, 0,
194 "Soft underlength error"},
195 {0x005A0000, 0, 0,
196 "Command to be cancelled not found"},
197 {0x00808000, 0, 0,
198 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500199 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500201 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500202 "4101: Soft device bus fabric error"},
Brian King933916f2007-03-29 12:43:30 -0500203 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500205 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500207 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500209 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500211 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500213 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500215 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500217 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500219 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500221 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500223 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500225 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500226 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500227 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500229 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 "8009: Impending cache battery pack failure"},
231 {0x02040400, 0, 0,
232 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500233 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 {0x023F0000, 0, 0,
236 "Synchronization required"},
237 {0x024E0000, 0, 0,
238 "No ready, IOA shutdown"},
239 {0x025A0000, 0, 0,
240 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500241 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 "3020: Storage subsystem configuration error"},
243 {0x03110B00, 0, 0,
244 "FFF5: Medium error, data unreadable, recommend reassign"},
245 {0x03110C00, 0, 0,
246 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500247 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500249 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500251 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500253 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 "3109: IOA timed out a device command"},
255 {0x04088000, 0, 0,
256 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500257 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500258 "4100: Hard device bus fabric error"},
Brian King933916f2007-03-29 12:43:30 -0500259 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500261 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500263 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 "9002: IOA reserved area LRC error"},
Brian King933916f2007-03-29 12:43:30 -0500265 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500267 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500269 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500271 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500273 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500275 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 "8150: PCI bus error"},
277 {0x04430000, 1, 0,
278 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500279 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500281 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500283 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500285 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 "8151: IOA microcode error"},
287 {0x04448500, 0, 0,
288 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500289 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500291 {0x04448700, 0, 0,
292 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 {0x04490000, 0, 0,
294 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500295 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500297 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500299 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500301 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500303 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500305 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500306 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500307 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500309 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600310 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500311 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500312 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500313 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500314 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500315 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500316 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500317 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500318 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500319 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 "FFF4: Command to logical unit failed"},
321 {0x05240000, 1, 0,
322 "Illegal request, invalid request type or request packet"},
323 {0x05250000, 0, 0,
324 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600325 {0x05258000, 0, 0,
326 "Illegal request, commands not allowed to this device"},
327 {0x05258100, 0, 0,
328 "Illegal request, command not allowed to a secondary adapter"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 {0x05260000, 0, 0,
330 "Illegal request, invalid field in parameter list"},
331 {0x05260100, 0, 0,
332 "Illegal request, parameter not supported"},
333 {0x05260200, 0, 0,
334 "Illegal request, parameter value invalid"},
335 {0x052C0000, 0, 0,
336 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600337 {0x052C8000, 1, 0,
338 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500339 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500341 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500343 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500344 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500345 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 "FFFB: SCSI bus was reset"},
347 {0x06290500, 0, 0,
348 "FFFE: SCSI bus transition to single ended"},
349 {0x06290600, 0, 0,
350 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500351 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500353 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500355 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500357 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600358 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500359 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500361 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500363 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500365 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600366 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500367 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500368 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500369 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500370 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500371 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500372 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500373 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500374 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500375 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500376 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800377 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
378 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500379 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500381 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500383 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500385 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600386 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500387 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600388 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500389 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500391 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
392 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500393 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500394 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500395 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500396 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 {0x07270000, 0, 0,
398 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500399 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500401 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500403 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500405 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500407 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500409 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500411 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500413 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500415 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500417 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500419 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500421 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500423 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500425 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500427 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500429 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500431 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500433 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500435 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500437 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 "9063: Maximum number of functional arrays has been exceeded"},
439 {0x0B260000, 0, 0,
440 "Aborted command, invalid descriptor"},
441 {0x0B5A0000, 0, 0,
442 "Command terminated by host"}
443};
444
445static const struct ipr_ses_table_entry ipr_ses_table[] = {
446 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
447 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
448 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
449 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
450 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
451 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
452 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
453 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
454 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
455 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
456 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
457 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
458 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
459};
460
461/*
462 * Function Prototypes
463 */
464static int ipr_reset_alert(struct ipr_cmnd *);
465static void ipr_process_ccn(struct ipr_cmnd *);
466static void ipr_process_error(struct ipr_cmnd *);
467static void ipr_reset_ioa_job(struct ipr_cmnd *);
468static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
469 enum ipr_shutdown_type);
470
471#ifdef CONFIG_SCSI_IPR_TRACE
472/**
473 * ipr_trc_hook - Add a trace entry to the driver trace
474 * @ipr_cmd: ipr command struct
475 * @type: trace type
476 * @add_data: additional data
477 *
478 * Return value:
479 * none
480 **/
481static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
482 u8 type, u32 add_data)
483{
484 struct ipr_trace_entry *trace_entry;
485 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
486
487 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
488 trace_entry->time = jiffies;
489 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
490 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800491 if (ipr_cmd->ioa_cfg->sis64)
492 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
493 else
494 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500495 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
497 trace_entry->u.add_data = add_data;
498}
499#else
500#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
501#endif
502
503/**
504 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
505 * @ipr_cmd: ipr command struct
506 *
507 * Return value:
508 * none
509 **/
510static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
511{
512 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
513 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -0800514 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
516 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -0800517 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800519 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800521
522 if (ipr_cmd->ioa_cfg->sis64)
523 ioarcb->u.sis64_addr_data.data_ioadl_addr =
524 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
525 else {
526 ioarcb->write_ioadl_addr =
527 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
528 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
529 }
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 ioasa->ioasc = 0;
532 ioasa->residual_data_len = 0;
Brian King35a39692006-09-25 12:39:20 -0500533 ioasa->u.gata.status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500536 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 ipr_cmd->sense_buffer[0] = 0;
538 ipr_cmd->dma_use_sg = 0;
539}
540
541/**
542 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
543 * @ipr_cmd: ipr command struct
544 *
545 * Return value:
546 * none
547 **/
548static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
549{
550 ipr_reinit_ipr_cmnd(ipr_cmd);
551 ipr_cmd->u.scratch = 0;
552 ipr_cmd->sibling = NULL;
553 init_timer(&ipr_cmd->timer);
554}
555
556/**
557 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
558 * @ioa_cfg: ioa config struct
559 *
560 * Return value:
561 * pointer to ipr command struct
562 **/
563static
564struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
565{
566 struct ipr_cmnd *ipr_cmd;
567
568 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
569 list_del(&ipr_cmd->queue);
570 ipr_init_ipr_cmnd(ipr_cmd);
571
572 return ipr_cmd;
573}
574
575/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
577 * @ioa_cfg: ioa config struct
578 * @clr_ints: interrupts to clear
579 *
580 * This function masks all interrupts on the adapter, then clears the
581 * interrupts specified in the mask
582 *
583 * Return value:
584 * none
585 **/
586static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
587 u32 clr_ints)
588{
589 volatile u32 int_reg;
590
591 /* Stop new interrupts */
592 ioa_cfg->allow_interrupts = 0;
593
594 /* Set interrupt mask to stop all new interrupts */
595 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
596
597 /* Clear any pending interrupts */
598 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
599 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
600}
601
602/**
603 * ipr_save_pcix_cmd_reg - Save PCI-X command register
604 * @ioa_cfg: ioa config struct
605 *
606 * Return value:
607 * 0 on success / -EIO on failure
608 **/
609static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
610{
611 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
612
Brian King7dce0e12007-01-23 11:25:30 -0600613 if (pcix_cmd_reg == 0)
614 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
617 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
618 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
619 return -EIO;
620 }
621
622 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
623 return 0;
624}
625
626/**
627 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
628 * @ioa_cfg: ioa config struct
629 *
630 * Return value:
631 * 0 on success / -EIO on failure
632 **/
633static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
634{
635 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
636
637 if (pcix_cmd_reg) {
638 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
639 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
640 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
641 return -EIO;
642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 }
644
645 return 0;
646}
647
648/**
Brian King35a39692006-09-25 12:39:20 -0500649 * ipr_sata_eh_done - done function for aborted SATA commands
650 * @ipr_cmd: ipr command struct
651 *
652 * This function is invoked for ops generated to SATA
653 * devices which are being aborted.
654 *
655 * Return value:
656 * none
657 **/
658static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
659{
660 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
661 struct ata_queued_cmd *qc = ipr_cmd->qc;
662 struct ipr_sata_port *sata_port = qc->ap->private_data;
663
664 qc->err_mask |= AC_ERR_OTHER;
665 sata_port->ioasa.status |= ATA_BUSY;
666 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
667 ata_qc_complete(qc);
668}
669
670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 * ipr_scsi_eh_done - mid-layer done function for aborted ops
672 * @ipr_cmd: ipr command struct
673 *
674 * This function is invoked by the interrupt handler for
675 * ops generated by the SCSI mid-layer which are being aborted.
676 *
677 * Return value:
678 * none
679 **/
680static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
681{
682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
683 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
684
685 scsi_cmd->result |= (DID_ERROR << 16);
686
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900687 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 scsi_cmd->scsi_done(scsi_cmd);
689 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
690}
691
692/**
693 * ipr_fail_all_ops - Fails all outstanding ops.
694 * @ioa_cfg: ioa config struct
695 *
696 * This function fails all outstanding ops.
697 *
698 * Return value:
699 * none
700 **/
701static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
702{
703 struct ipr_cmnd *ipr_cmd, *temp;
704
705 ENTER;
706 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
707 list_del(&ipr_cmd->queue);
708
709 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
710 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
711
712 if (ipr_cmd->scsi_cmd)
713 ipr_cmd->done = ipr_scsi_eh_done;
Brian King35a39692006-09-25 12:39:20 -0500714 else if (ipr_cmd->qc)
715 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
717 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
718 del_timer(&ipr_cmd->timer);
719 ipr_cmd->done(ipr_cmd);
720 }
721
722 LEAVE;
723}
724
725/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800726 * ipr_send_command - Send driver initiated requests.
727 * @ipr_cmd: ipr command struct
728 *
729 * This function sends a command to the adapter using the correct write call.
730 * In the case of sis64, calculate the ioarcb size required. Then or in the
731 * appropriate bits.
732 *
733 * Return value:
734 * none
735 **/
736static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
737{
738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
739 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
740
741 if (ioa_cfg->sis64) {
742 /* The default size is 256 bytes */
743 send_dma_addr |= 0x1;
744
745 /* If the number of ioadls * size of ioadl > 128 bytes,
746 then use a 512 byte ioarcb */
747 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
748 send_dma_addr |= 0x4;
749 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
750 } else
751 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
752}
753
754/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 * ipr_do_req - Send driver initiated requests.
756 * @ipr_cmd: ipr command struct
757 * @done: done function
758 * @timeout_func: timeout function
759 * @timeout: timeout value
760 *
761 * This function sends the specified command to the adapter with the
762 * timeout given. The done function is invoked on command completion.
763 *
764 * Return value:
765 * none
766 **/
767static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
768 void (*done) (struct ipr_cmnd *),
769 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
770{
771 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
772
773 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
774
775 ipr_cmd->done = done;
776
777 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
778 ipr_cmd->timer.expires = jiffies + timeout;
779 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
780
781 add_timer(&ipr_cmd->timer);
782
783 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
784
785 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800786
787 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788}
789
790/**
791 * ipr_internal_cmd_done - Op done function for an internally generated op.
792 * @ipr_cmd: ipr command struct
793 *
794 * This function is the op done function for an internally generated,
795 * blocking op. It simply wakes the sleeping thread.
796 *
797 * Return value:
798 * none
799 **/
800static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
801{
802 if (ipr_cmd->sibling)
803 ipr_cmd->sibling = NULL;
804 else
805 complete(&ipr_cmd->completion);
806}
807
808/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800809 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
810 * @ipr_cmd: ipr command struct
811 * @dma_addr: dma address
812 * @len: transfer length
813 * @flags: ioadl flag value
814 *
815 * This function initializes an ioadl in the case where there is only a single
816 * descriptor.
817 *
818 * Return value:
819 * nothing
820 **/
821static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
822 u32 len, int flags)
823{
824 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
825 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
826
827 ipr_cmd->dma_use_sg = 1;
828
829 if (ipr_cmd->ioa_cfg->sis64) {
830 ioadl64->flags = cpu_to_be32(flags);
831 ioadl64->data_len = cpu_to_be32(len);
832 ioadl64->address = cpu_to_be64(dma_addr);
833
834 ipr_cmd->ioarcb.ioadl_len =
835 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
836 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
837 } else {
838 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
839 ioadl->address = cpu_to_be32(dma_addr);
840
841 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
842 ipr_cmd->ioarcb.read_ioadl_len =
843 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
844 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
845 } else {
846 ipr_cmd->ioarcb.ioadl_len =
847 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
848 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
849 }
850 }
851}
852
853/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 * ipr_send_blocking_cmd - Send command and sleep on its completion.
855 * @ipr_cmd: ipr command struct
856 * @timeout_func: function to invoke if command times out
857 * @timeout: timeout
858 *
859 * Return value:
860 * none
861 **/
862static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
863 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
864 u32 timeout)
865{
866 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
867
868 init_completion(&ipr_cmd->completion);
869 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
870
871 spin_unlock_irq(ioa_cfg->host->host_lock);
872 wait_for_completion(&ipr_cmd->completion);
873 spin_lock_irq(ioa_cfg->host->host_lock);
874}
875
876/**
877 * ipr_send_hcam - Send an HCAM to the adapter.
878 * @ioa_cfg: ioa config struct
879 * @type: HCAM type
880 * @hostrcb: hostrcb struct
881 *
882 * This function will send a Host Controlled Async command to the adapter.
883 * If HCAMs are currently not allowed to be issued to the adapter, it will
884 * place the hostrcb on the free queue.
885 *
886 * Return value:
887 * none
888 **/
889static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
890 struct ipr_hostrcb *hostrcb)
891{
892 struct ipr_cmnd *ipr_cmd;
893 struct ipr_ioarcb *ioarcb;
894
895 if (ioa_cfg->allow_cmds) {
896 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
897 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
898 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
899
900 ipr_cmd->u.hostrcb = hostrcb;
901 ioarcb = &ipr_cmd->ioarcb;
902
903 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
904 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
905 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
906 ioarcb->cmd_pkt.cdb[1] = type;
907 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
908 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
909
Wayne Boyera32c0552010-02-19 13:23:36 -0800910 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
911 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
914 ipr_cmd->done = ipr_process_ccn;
915 else
916 ipr_cmd->done = ipr_process_error;
917
918 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
919
920 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800921
922 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 } else {
924 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
925 }
926}
927
928/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800929 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800931 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 *
933 * Return value:
934 * none
935 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800936static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800938 switch(proto) {
939 case IPR_PROTO_SATA:
940 case IPR_PROTO_SAS_STP:
941 res->ata_class = ATA_DEV_ATA;
942 break;
943 case IPR_PROTO_SATA_ATAPI:
944 case IPR_PROTO_SAS_STP_ATAPI:
945 res->ata_class = ATA_DEV_ATAPI;
946 break;
947 default:
948 res->ata_class = ATA_DEV_UNKNOWN;
949 break;
950 };
951}
952
953/**
954 * ipr_init_res_entry - Initialize a resource entry struct.
955 * @res: resource entry struct
956 * @cfgtew: config table entry wrapper struct
957 *
958 * Return value:
959 * none
960 **/
961static void ipr_init_res_entry(struct ipr_resource_entry *res,
962 struct ipr_config_table_entry_wrapper *cfgtew)
963{
964 int found = 0;
965 unsigned int proto;
966 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
967 struct ipr_resource_entry *gscsi_res = NULL;
968
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -0600969 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 res->in_erp = 0;
971 res->add_to_ml = 0;
972 res->del_from_ml = 0;
973 res->resetting_device = 0;
974 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -0500975 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800976
977 if (ioa_cfg->sis64) {
978 proto = cfgtew->u.cfgte64->proto;
979 res->res_flags = cfgtew->u.cfgte64->res_flags;
980 res->qmodel = IPR_QUEUEING_MODEL64(res);
981 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
982
983 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
984 sizeof(res->res_path));
985
986 res->bus = 0;
987 res->lun = scsilun_to_int(&res->dev_lun);
988
989 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
990 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
991 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
992 found = 1;
993 res->target = gscsi_res->target;
994 break;
995 }
996 }
997 if (!found) {
998 res->target = find_first_zero_bit(ioa_cfg->target_ids,
999 ioa_cfg->max_devs_supported);
1000 set_bit(res->target, ioa_cfg->target_ids);
1001 }
1002
1003 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1004 sizeof(res->dev_lun.scsi_lun));
1005 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1006 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1007 res->target = 0;
1008 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1009 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1010 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1011 ioa_cfg->max_devs_supported);
1012 set_bit(res->target, ioa_cfg->array_ids);
1013 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1014 res->bus = IPR_VSET_VIRTUAL_BUS;
1015 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1016 ioa_cfg->max_devs_supported);
1017 set_bit(res->target, ioa_cfg->vset_ids);
1018 } else {
1019 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1020 ioa_cfg->max_devs_supported);
1021 set_bit(res->target, ioa_cfg->target_ids);
1022 }
1023 } else {
1024 proto = cfgtew->u.cfgte->proto;
1025 res->qmodel = IPR_QUEUEING_MODEL(res);
1026 res->flags = cfgtew->u.cfgte->flags;
1027 if (res->flags & IPR_IS_IOA_RESOURCE)
1028 res->type = IPR_RES_TYPE_IOAFP;
1029 else
1030 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1031
1032 res->bus = cfgtew->u.cfgte->res_addr.bus;
1033 res->target = cfgtew->u.cfgte->res_addr.target;
1034 res->lun = cfgtew->u.cfgte->res_addr.lun;
1035 }
1036
1037 ipr_update_ata_class(res, proto);
1038}
1039
1040/**
1041 * ipr_is_same_device - Determine if two devices are the same.
1042 * @res: resource entry struct
1043 * @cfgtew: config table entry wrapper struct
1044 *
1045 * Return value:
1046 * 1 if the devices are the same / 0 otherwise
1047 **/
1048static int ipr_is_same_device(struct ipr_resource_entry *res,
1049 struct ipr_config_table_entry_wrapper *cfgtew)
1050{
1051 if (res->ioa_cfg->sis64) {
1052 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1053 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1054 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1055 sizeof(cfgtew->u.cfgte64->lun))) {
1056 return 1;
1057 }
1058 } else {
1059 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1060 res->target == cfgtew->u.cfgte->res_addr.target &&
1061 res->lun == cfgtew->u.cfgte->res_addr.lun)
1062 return 1;
1063 }
1064
1065 return 0;
1066}
1067
1068/**
1069 * ipr_format_resource_path - Format the resource path for printing.
1070 * @res_path: resource path
1071 * @buf: buffer
1072 *
1073 * Return value:
1074 * pointer to buffer
1075 **/
1076static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1077{
1078 int i;
1079
1080 sprintf(buffer, "%02X", res_path[0]);
1081 for (i=1; res_path[i] != 0xff; i++)
Wayne Boyer4565e372010-02-19 13:24:07 -08001082 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001083
1084 return buffer;
1085}
1086
1087/**
1088 * ipr_update_res_entry - Update the resource entry.
1089 * @res: resource entry struct
1090 * @cfgtew: config table entry wrapper struct
1091 *
1092 * Return value:
1093 * none
1094 **/
1095static void ipr_update_res_entry(struct ipr_resource_entry *res,
1096 struct ipr_config_table_entry_wrapper *cfgtew)
1097{
1098 char buffer[IPR_MAX_RES_PATH_LENGTH];
1099 unsigned int proto;
1100 int new_path = 0;
1101
1102 if (res->ioa_cfg->sis64) {
1103 res->flags = cfgtew->u.cfgte64->flags;
1104 res->res_flags = cfgtew->u.cfgte64->res_flags;
1105 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1106
1107 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1108 sizeof(struct ipr_std_inq_data));
1109
1110 res->qmodel = IPR_QUEUEING_MODEL64(res);
1111 proto = cfgtew->u.cfgte64->proto;
1112 res->res_handle = cfgtew->u.cfgte64->res_handle;
1113 res->dev_id = cfgtew->u.cfgte64->dev_id;
1114
1115 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1116 sizeof(res->dev_lun.scsi_lun));
1117
1118 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1119 sizeof(res->res_path))) {
1120 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1121 sizeof(res->res_path));
1122 new_path = 1;
1123 }
1124
1125 if (res->sdev && new_path)
1126 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1127 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1128 } else {
1129 res->flags = cfgtew->u.cfgte->flags;
1130 if (res->flags & IPR_IS_IOA_RESOURCE)
1131 res->type = IPR_RES_TYPE_IOAFP;
1132 else
1133 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1134
1135 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1136 sizeof(struct ipr_std_inq_data));
1137
1138 res->qmodel = IPR_QUEUEING_MODEL(res);
1139 proto = cfgtew->u.cfgte->proto;
1140 res->res_handle = cfgtew->u.cfgte->res_handle;
1141 }
1142
1143 ipr_update_ata_class(res, proto);
1144}
1145
1146/**
1147 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1148 * for the resource.
1149 * @res: resource entry struct
1150 * @cfgtew: config table entry wrapper struct
1151 *
1152 * Return value:
1153 * none
1154 **/
1155static void ipr_clear_res_target(struct ipr_resource_entry *res)
1156{
1157 struct ipr_resource_entry *gscsi_res = NULL;
1158 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1159
1160 if (!ioa_cfg->sis64)
1161 return;
1162
1163 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1164 clear_bit(res->target, ioa_cfg->array_ids);
1165 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1166 clear_bit(res->target, ioa_cfg->vset_ids);
1167 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1168 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1169 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1170 return;
1171 clear_bit(res->target, ioa_cfg->target_ids);
1172
1173 } else if (res->bus == 0)
1174 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175}
1176
1177/**
1178 * ipr_handle_config_change - Handle a config change from the adapter
1179 * @ioa_cfg: ioa config struct
1180 * @hostrcb: hostrcb
1181 *
1182 * Return value:
1183 * none
1184 **/
1185static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001186 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
1188 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001189 struct ipr_config_table_entry_wrapper cfgtew;
1190 __be32 cc_res_handle;
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 u32 is_ndn = 1;
1193
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001194 if (ioa_cfg->sis64) {
1195 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1196 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1197 } else {
1198 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1199 cc_res_handle = cfgtew.u.cfgte->res_handle;
1200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001203 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 is_ndn = 0;
1205 break;
1206 }
1207 }
1208
1209 if (is_ndn) {
1210 if (list_empty(&ioa_cfg->free_res_q)) {
1211 ipr_send_hcam(ioa_cfg,
1212 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1213 hostrcb);
1214 return;
1215 }
1216
1217 res = list_entry(ioa_cfg->free_res_q.next,
1218 struct ipr_resource_entry, queue);
1219
1220 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001221 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1223 }
1224
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001225 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1228 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001230 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 if (ioa_cfg->allow_ml_add_del)
1232 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001233 } else {
1234 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 } else if (!res->sdev) {
1238 res->add_to_ml = 1;
1239 if (ioa_cfg->allow_ml_add_del)
1240 schedule_work(&ioa_cfg->work_q);
1241 }
1242
1243 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1244}
1245
1246/**
1247 * ipr_process_ccn - Op done function for a CCN.
1248 * @ipr_cmd: ipr command struct
1249 *
1250 * This function is the op done function for a configuration
1251 * change notification host controlled async from the adapter.
1252 *
1253 * Return value:
1254 * none
1255 **/
1256static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1257{
1258 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1259 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1260 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1261
1262 list_del(&hostrcb->queue);
1263 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1264
1265 if (ioasc) {
1266 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1267 dev_err(&ioa_cfg->pdev->dev,
1268 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1269
1270 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1271 } else {
1272 ipr_handle_config_change(ioa_cfg, hostrcb);
1273 }
1274}
1275
1276/**
Brian King8cf093e2007-04-26 16:00:14 -05001277 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1278 * @i: index into buffer
1279 * @buf: string to modify
1280 *
1281 * This function will strip all trailing whitespace, pad the end
1282 * of the string with a single space, and NULL terminate the string.
1283 *
1284 * Return value:
1285 * new length of string
1286 **/
1287static int strip_and_pad_whitespace(int i, char *buf)
1288{
1289 while (i && buf[i] == ' ')
1290 i--;
1291 buf[i+1] = ' ';
1292 buf[i+2] = '\0';
1293 return i + 2;
1294}
1295
1296/**
1297 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1298 * @prefix: string to print at start of printk
1299 * @hostrcb: hostrcb pointer
1300 * @vpd: vendor/product id/sn struct
1301 *
1302 * Return value:
1303 * none
1304 **/
1305static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1306 struct ipr_vpd *vpd)
1307{
1308 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1309 int i = 0;
1310
1311 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1312 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1313
1314 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1315 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1316
1317 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1318 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1319
1320 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1321}
1322
1323/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001325 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 *
1327 * Return value:
1328 * none
1329 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001330static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1333 + IPR_SERIAL_NUM_LEN];
1334
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001335 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1336 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 IPR_PROD_ID_LEN);
1338 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1339 ipr_err("Vendor/Product ID: %s\n", buffer);
1340
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001341 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1343 ipr_err(" Serial Number: %s\n", buffer);
1344}
1345
1346/**
Brian King8cf093e2007-04-26 16:00:14 -05001347 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1348 * @prefix: string to print at start of printk
1349 * @hostrcb: hostrcb pointer
1350 * @vpd: vendor/product id/sn/wwn struct
1351 *
1352 * Return value:
1353 * none
1354 **/
1355static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1356 struct ipr_ext_vpd *vpd)
1357{
1358 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1359 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1360 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1361}
1362
1363/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001364 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1365 * @vpd: vendor/product id/sn/wwn struct
1366 *
1367 * Return value:
1368 * none
1369 **/
1370static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1371{
1372 ipr_log_vpd(&vpd->vpd);
1373 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1374 be32_to_cpu(vpd->wwid[1]));
1375}
1376
1377/**
1378 * ipr_log_enhanced_cache_error - Log a cache error.
1379 * @ioa_cfg: ioa config struct
1380 * @hostrcb: hostrcb struct
1381 *
1382 * Return value:
1383 * none
1384 **/
1385static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1386 struct ipr_hostrcb *hostrcb)
1387{
Wayne Boyer4565e372010-02-19 13:24:07 -08001388 struct ipr_hostrcb_type_12_error *error;
1389
1390 if (ioa_cfg->sis64)
1391 error = &hostrcb->hcam.u.error64.u.type_12_error;
1392 else
1393 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001394
1395 ipr_err("-----Current Configuration-----\n");
1396 ipr_err("Cache Directory Card Information:\n");
1397 ipr_log_ext_vpd(&error->ioa_vpd);
1398 ipr_err("Adapter Card Information:\n");
1399 ipr_log_ext_vpd(&error->cfc_vpd);
1400
1401 ipr_err("-----Expected Configuration-----\n");
1402 ipr_err("Cache Directory Card Information:\n");
1403 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1404 ipr_err("Adapter Card Information:\n");
1405 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1406
1407 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1408 be32_to_cpu(error->ioa_data[0]),
1409 be32_to_cpu(error->ioa_data[1]),
1410 be32_to_cpu(error->ioa_data[2]));
1411}
1412
1413/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 * ipr_log_cache_error - Log a cache error.
1415 * @ioa_cfg: ioa config struct
1416 * @hostrcb: hostrcb struct
1417 *
1418 * Return value:
1419 * none
1420 **/
1421static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1422 struct ipr_hostrcb *hostrcb)
1423{
1424 struct ipr_hostrcb_type_02_error *error =
1425 &hostrcb->hcam.u.error.u.type_02_error;
1426
1427 ipr_err("-----Current Configuration-----\n");
1428 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001429 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001431 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433 ipr_err("-----Expected Configuration-----\n");
1434 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001435 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001437 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1440 be32_to_cpu(error->ioa_data[0]),
1441 be32_to_cpu(error->ioa_data[1]),
1442 be32_to_cpu(error->ioa_data[2]));
1443}
1444
1445/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001446 * ipr_log_enhanced_config_error - Log a configuration error.
1447 * @ioa_cfg: ioa config struct
1448 * @hostrcb: hostrcb struct
1449 *
1450 * Return value:
1451 * none
1452 **/
1453static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1454 struct ipr_hostrcb *hostrcb)
1455{
1456 int errors_logged, i;
1457 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1458 struct ipr_hostrcb_type_13_error *error;
1459
1460 error = &hostrcb->hcam.u.error.u.type_13_error;
1461 errors_logged = be32_to_cpu(error->errors_logged);
1462
1463 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1464 be32_to_cpu(error->errors_detected), errors_logged);
1465
1466 dev_entry = error->dev;
1467
1468 for (i = 0; i < errors_logged; i++, dev_entry++) {
1469 ipr_err_separator;
1470
1471 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1472 ipr_log_ext_vpd(&dev_entry->vpd);
1473
1474 ipr_err("-----New Device Information-----\n");
1475 ipr_log_ext_vpd(&dev_entry->new_vpd);
1476
1477 ipr_err("Cache Directory Card Information:\n");
1478 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1479
1480 ipr_err("Adapter Card Information:\n");
1481 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1482 }
1483}
1484
1485/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001486 * ipr_log_sis64_config_error - Log a device error.
1487 * @ioa_cfg: ioa config struct
1488 * @hostrcb: hostrcb struct
1489 *
1490 * Return value:
1491 * none
1492 **/
1493static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1494 struct ipr_hostrcb *hostrcb)
1495{
1496 int errors_logged, i;
1497 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1498 struct ipr_hostrcb_type_23_error *error;
1499 char buffer[IPR_MAX_RES_PATH_LENGTH];
1500
1501 error = &hostrcb->hcam.u.error64.u.type_23_error;
1502 errors_logged = be32_to_cpu(error->errors_logged);
1503
1504 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1505 be32_to_cpu(error->errors_detected), errors_logged);
1506
1507 dev_entry = error->dev;
1508
1509 for (i = 0; i < errors_logged; i++, dev_entry++) {
1510 ipr_err_separator;
1511
1512 ipr_err("Device %d : %s", i + 1,
1513 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1514 ipr_log_ext_vpd(&dev_entry->vpd);
1515
1516 ipr_err("-----New Device Information-----\n");
1517 ipr_log_ext_vpd(&dev_entry->new_vpd);
1518
1519 ipr_err("Cache Directory Card Information:\n");
1520 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1521
1522 ipr_err("Adapter Card Information:\n");
1523 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1524 }
1525}
1526
1527/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 * ipr_log_config_error - Log a configuration error.
1529 * @ioa_cfg: ioa config struct
1530 * @hostrcb: hostrcb struct
1531 *
1532 * Return value:
1533 * none
1534 **/
1535static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1536 struct ipr_hostrcb *hostrcb)
1537{
1538 int errors_logged, i;
1539 struct ipr_hostrcb_device_data_entry *dev_entry;
1540 struct ipr_hostrcb_type_03_error *error;
1541
1542 error = &hostrcb->hcam.u.error.u.type_03_error;
1543 errors_logged = be32_to_cpu(error->errors_logged);
1544
1545 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1546 be32_to_cpu(error->errors_detected), errors_logged);
1547
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001548 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550 for (i = 0; i < errors_logged; i++, dev_entry++) {
1551 ipr_err_separator;
1552
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001553 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001554 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001557 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001560 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
1562 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001563 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1566 be32_to_cpu(dev_entry->ioa_data[0]),
1567 be32_to_cpu(dev_entry->ioa_data[1]),
1568 be32_to_cpu(dev_entry->ioa_data[2]),
1569 be32_to_cpu(dev_entry->ioa_data[3]),
1570 be32_to_cpu(dev_entry->ioa_data[4]));
1571 }
1572}
1573
1574/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001575 * ipr_log_enhanced_array_error - Log an array configuration error.
1576 * @ioa_cfg: ioa config struct
1577 * @hostrcb: hostrcb struct
1578 *
1579 * Return value:
1580 * none
1581 **/
1582static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1583 struct ipr_hostrcb *hostrcb)
1584{
1585 int i, num_entries;
1586 struct ipr_hostrcb_type_14_error *error;
1587 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1588 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1589
1590 error = &hostrcb->hcam.u.error.u.type_14_error;
1591
1592 ipr_err_separator;
1593
1594 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1595 error->protection_level,
1596 ioa_cfg->host->host_no,
1597 error->last_func_vset_res_addr.bus,
1598 error->last_func_vset_res_addr.target,
1599 error->last_func_vset_res_addr.lun);
1600
1601 ipr_err_separator;
1602
1603 array_entry = error->array_member;
1604 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1605 sizeof(error->array_member));
1606
1607 for (i = 0; i < num_entries; i++, array_entry++) {
1608 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1609 continue;
1610
1611 if (be32_to_cpu(error->exposed_mode_adn) == i)
1612 ipr_err("Exposed Array Member %d:\n", i);
1613 else
1614 ipr_err("Array Member %d:\n", i);
1615
1616 ipr_log_ext_vpd(&array_entry->vpd);
1617 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1618 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1619 "Expected Location");
1620
1621 ipr_err_separator;
1622 }
1623}
1624
1625/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 * ipr_log_array_error - Log an array configuration error.
1627 * @ioa_cfg: ioa config struct
1628 * @hostrcb: hostrcb struct
1629 *
1630 * Return value:
1631 * none
1632 **/
1633static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1634 struct ipr_hostrcb *hostrcb)
1635{
1636 int i;
1637 struct ipr_hostrcb_type_04_error *error;
1638 struct ipr_hostrcb_array_data_entry *array_entry;
1639 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1640
1641 error = &hostrcb->hcam.u.error.u.type_04_error;
1642
1643 ipr_err_separator;
1644
1645 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1646 error->protection_level,
1647 ioa_cfg->host->host_no,
1648 error->last_func_vset_res_addr.bus,
1649 error->last_func_vset_res_addr.target,
1650 error->last_func_vset_res_addr.lun);
1651
1652 ipr_err_separator;
1653
1654 array_entry = error->array_member;
1655
1656 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001657 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 continue;
1659
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001660 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001662 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001665 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001667 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1668 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1669 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671 ipr_err_separator;
1672
1673 if (i == 9)
1674 array_entry = error->array_member2;
1675 else
1676 array_entry++;
1677 }
1678}
1679
1680/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001681 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001682 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001683 * @data: IOA error data
1684 * @len: data length
1685 *
1686 * Return value:
1687 * none
1688 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001689static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001690{
1691 int i;
1692
1693 if (len == 0)
1694 return;
1695
Brian Kingac719ab2006-11-21 10:28:42 -06001696 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1697 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1698
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001699 for (i = 0; i < len / 4; i += 4) {
1700 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1701 be32_to_cpu(data[i]),
1702 be32_to_cpu(data[i+1]),
1703 be32_to_cpu(data[i+2]),
1704 be32_to_cpu(data[i+3]));
1705 }
1706}
1707
1708/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001709 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1710 * @ioa_cfg: ioa config struct
1711 * @hostrcb: hostrcb struct
1712 *
1713 * Return value:
1714 * none
1715 **/
1716static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1717 struct ipr_hostrcb *hostrcb)
1718{
1719 struct ipr_hostrcb_type_17_error *error;
1720
Wayne Boyer4565e372010-02-19 13:24:07 -08001721 if (ioa_cfg->sis64)
1722 error = &hostrcb->hcam.u.error64.u.type_17_error;
1723 else
1724 error = &hostrcb->hcam.u.error.u.type_17_error;
1725
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001726 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001727 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001728
Brian King8cf093e2007-04-26 16:00:14 -05001729 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1730 be32_to_cpu(hostrcb->hcam.u.error.prc));
1731 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001732 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001733 be32_to_cpu(hostrcb->hcam.length) -
1734 (offsetof(struct ipr_hostrcb_error, u) +
1735 offsetof(struct ipr_hostrcb_type_17_error, data)));
1736}
1737
1738/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001739 * ipr_log_dual_ioa_error - Log a dual adapter error.
1740 * @ioa_cfg: ioa config struct
1741 * @hostrcb: hostrcb struct
1742 *
1743 * Return value:
1744 * none
1745 **/
1746static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1747 struct ipr_hostrcb *hostrcb)
1748{
1749 struct ipr_hostrcb_type_07_error *error;
1750
1751 error = &hostrcb->hcam.u.error.u.type_07_error;
1752 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001753 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001754
Brian King8cf093e2007-04-26 16:00:14 -05001755 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1756 be32_to_cpu(hostrcb->hcam.u.error.prc));
1757 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001758 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001759 be32_to_cpu(hostrcb->hcam.length) -
1760 (offsetof(struct ipr_hostrcb_error, u) +
1761 offsetof(struct ipr_hostrcb_type_07_error, data)));
1762}
1763
Brian King49dc6a12006-11-21 10:28:35 -06001764static const struct {
1765 u8 active;
1766 char *desc;
1767} path_active_desc[] = {
1768 { IPR_PATH_NO_INFO, "Path" },
1769 { IPR_PATH_ACTIVE, "Active path" },
1770 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1771};
1772
1773static const struct {
1774 u8 state;
1775 char *desc;
1776} path_state_desc[] = {
1777 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1778 { IPR_PATH_HEALTHY, "is healthy" },
1779 { IPR_PATH_DEGRADED, "is degraded" },
1780 { IPR_PATH_FAILED, "is failed" }
1781};
1782
1783/**
1784 * ipr_log_fabric_path - Log a fabric path error
1785 * @hostrcb: hostrcb struct
1786 * @fabric: fabric descriptor
1787 *
1788 * Return value:
1789 * none
1790 **/
1791static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1792 struct ipr_hostrcb_fabric_desc *fabric)
1793{
1794 int i, j;
1795 u8 path_state = fabric->path_state;
1796 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1797 u8 state = path_state & IPR_PATH_STATE_MASK;
1798
1799 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1800 if (path_active_desc[i].active != active)
1801 continue;
1802
1803 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1804 if (path_state_desc[j].state != state)
1805 continue;
1806
1807 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1808 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1809 path_active_desc[i].desc, path_state_desc[j].desc,
1810 fabric->ioa_port);
1811 } else if (fabric->cascaded_expander == 0xff) {
1812 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1813 path_active_desc[i].desc, path_state_desc[j].desc,
1814 fabric->ioa_port, fabric->phy);
1815 } else if (fabric->phy == 0xff) {
1816 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1817 path_active_desc[i].desc, path_state_desc[j].desc,
1818 fabric->ioa_port, fabric->cascaded_expander);
1819 } else {
1820 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1821 path_active_desc[i].desc, path_state_desc[j].desc,
1822 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1823 }
1824 return;
1825 }
1826 }
1827
1828 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1829 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1830}
1831
Wayne Boyer4565e372010-02-19 13:24:07 -08001832/**
1833 * ipr_log64_fabric_path - Log a fabric path error
1834 * @hostrcb: hostrcb struct
1835 * @fabric: fabric descriptor
1836 *
1837 * Return value:
1838 * none
1839 **/
1840static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1841 struct ipr_hostrcb64_fabric_desc *fabric)
1842{
1843 int i, j;
1844 u8 path_state = fabric->path_state;
1845 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1846 u8 state = path_state & IPR_PATH_STATE_MASK;
1847 char buffer[IPR_MAX_RES_PATH_LENGTH];
1848
1849 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1850 if (path_active_desc[i].active != active)
1851 continue;
1852
1853 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1854 if (path_state_desc[j].state != state)
1855 continue;
1856
1857 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1858 path_active_desc[i].desc, path_state_desc[j].desc,
1859 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1860 return;
1861 }
1862 }
1863
1864 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1865 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1866}
1867
Brian King49dc6a12006-11-21 10:28:35 -06001868static const struct {
1869 u8 type;
1870 char *desc;
1871} path_type_desc[] = {
1872 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1873 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1874 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1875 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1876};
1877
1878static const struct {
1879 u8 status;
1880 char *desc;
1881} path_status_desc[] = {
1882 { IPR_PATH_CFG_NO_PROB, "Functional" },
1883 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1884 { IPR_PATH_CFG_FAILED, "Failed" },
1885 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1886 { IPR_PATH_NOT_DETECTED, "Missing" },
1887 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1888};
1889
1890static const char *link_rate[] = {
1891 "unknown",
1892 "disabled",
1893 "phy reset problem",
1894 "spinup hold",
1895 "port selector",
1896 "unknown",
1897 "unknown",
1898 "unknown",
1899 "1.5Gbps",
1900 "3.0Gbps",
1901 "unknown",
1902 "unknown",
1903 "unknown",
1904 "unknown",
1905 "unknown",
1906 "unknown"
1907};
1908
1909/**
1910 * ipr_log_path_elem - Log a fabric path element.
1911 * @hostrcb: hostrcb struct
1912 * @cfg: fabric path element struct
1913 *
1914 * Return value:
1915 * none
1916 **/
1917static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1918 struct ipr_hostrcb_config_element *cfg)
1919{
1920 int i, j;
1921 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1922 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1923
1924 if (type == IPR_PATH_CFG_NOT_EXIST)
1925 return;
1926
1927 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1928 if (path_type_desc[i].type != type)
1929 continue;
1930
1931 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1932 if (path_status_desc[j].status != status)
1933 continue;
1934
1935 if (type == IPR_PATH_CFG_IOA_PORT) {
1936 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1937 path_status_desc[j].desc, path_type_desc[i].desc,
1938 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1939 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1940 } else {
1941 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1942 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1943 path_status_desc[j].desc, path_type_desc[i].desc,
1944 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1945 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1946 } else if (cfg->cascaded_expander == 0xff) {
1947 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1948 "WWN=%08X%08X\n", path_status_desc[j].desc,
1949 path_type_desc[i].desc, cfg->phy,
1950 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1951 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1952 } else if (cfg->phy == 0xff) {
1953 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1954 "WWN=%08X%08X\n", path_status_desc[j].desc,
1955 path_type_desc[i].desc, cfg->cascaded_expander,
1956 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1957 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1958 } else {
1959 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1960 "WWN=%08X%08X\n", path_status_desc[j].desc,
1961 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1962 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1963 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1964 }
1965 }
1966 return;
1967 }
1968 }
1969
1970 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1971 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1972 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1973 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1974}
1975
1976/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001977 * ipr_log64_path_elem - Log a fabric path element.
1978 * @hostrcb: hostrcb struct
1979 * @cfg: fabric path element struct
1980 *
1981 * Return value:
1982 * none
1983 **/
1984static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
1985 struct ipr_hostrcb64_config_element *cfg)
1986{
1987 int i, j;
1988 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
1989 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1990 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1991 char buffer[IPR_MAX_RES_PATH_LENGTH];
1992
1993 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
1994 return;
1995
1996 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997 if (path_type_desc[i].type != type)
1998 continue;
1999
2000 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001 if (path_status_desc[j].status != status)
2002 continue;
2003
2004 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2005 path_status_desc[j].desc, path_type_desc[i].desc,
2006 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2007 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009 return;
2010 }
2011 }
2012 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2013 "WWN=%08X%08X\n", cfg->type_status,
2014 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2015 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2016 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2017}
2018
2019/**
Brian King49dc6a12006-11-21 10:28:35 -06002020 * ipr_log_fabric_error - Log a fabric error.
2021 * @ioa_cfg: ioa config struct
2022 * @hostrcb: hostrcb struct
2023 *
2024 * Return value:
2025 * none
2026 **/
2027static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2028 struct ipr_hostrcb *hostrcb)
2029{
2030 struct ipr_hostrcb_type_20_error *error;
2031 struct ipr_hostrcb_fabric_desc *fabric;
2032 struct ipr_hostrcb_config_element *cfg;
2033 int i, add_len;
2034
2035 error = &hostrcb->hcam.u.error.u.type_20_error;
2036 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2037 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2038
2039 add_len = be32_to_cpu(hostrcb->hcam.length) -
2040 (offsetof(struct ipr_hostrcb_error, u) +
2041 offsetof(struct ipr_hostrcb_type_20_error, desc));
2042
2043 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2044 ipr_log_fabric_path(hostrcb, fabric);
2045 for_each_fabric_cfg(fabric, cfg)
2046 ipr_log_path_elem(hostrcb, cfg);
2047
2048 add_len -= be16_to_cpu(fabric->length);
2049 fabric = (struct ipr_hostrcb_fabric_desc *)
2050 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2051 }
2052
Brian Kingac719ab2006-11-21 10:28:42 -06002053 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002054}
2055
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002056/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002057 * ipr_log_sis64_array_error - Log a sis64 array error.
2058 * @ioa_cfg: ioa config struct
2059 * @hostrcb: hostrcb struct
2060 *
2061 * Return value:
2062 * none
2063 **/
2064static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2065 struct ipr_hostrcb *hostrcb)
2066{
2067 int i, num_entries;
2068 struct ipr_hostrcb_type_24_error *error;
2069 struct ipr_hostrcb64_array_data_entry *array_entry;
2070 char buffer[IPR_MAX_RES_PATH_LENGTH];
2071 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2072
2073 error = &hostrcb->hcam.u.error64.u.type_24_error;
2074
2075 ipr_err_separator;
2076
2077 ipr_err("RAID %s Array Configuration: %s\n",
2078 error->protection_level,
2079 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2080
2081 ipr_err_separator;
2082
2083 array_entry = error->array_member;
2084 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2085 sizeof(error->array_member));
2086
2087 for (i = 0; i < num_entries; i++, array_entry++) {
2088
2089 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2090 continue;
2091
2092 if (error->exposed_mode_adn == i)
2093 ipr_err("Exposed Array Member %d:\n", i);
2094 else
2095 ipr_err("Array Member %d:\n", i);
2096
2097 ipr_err("Array Member %d:\n", i);
2098 ipr_log_ext_vpd(&array_entry->vpd);
2099 ipr_err("Current Location: %s",
2100 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2101 ipr_err("Expected Location: %s",
2102 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2103
2104 ipr_err_separator;
2105 }
2106}
2107
2108/**
2109 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2110 * @ioa_cfg: ioa config struct
2111 * @hostrcb: hostrcb struct
2112 *
2113 * Return value:
2114 * none
2115 **/
2116static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2117 struct ipr_hostrcb *hostrcb)
2118{
2119 struct ipr_hostrcb_type_30_error *error;
2120 struct ipr_hostrcb64_fabric_desc *fabric;
2121 struct ipr_hostrcb64_config_element *cfg;
2122 int i, add_len;
2123
2124 error = &hostrcb->hcam.u.error64.u.type_30_error;
2125
2126 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2127 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2128
2129 add_len = be32_to_cpu(hostrcb->hcam.length) -
2130 (offsetof(struct ipr_hostrcb64_error, u) +
2131 offsetof(struct ipr_hostrcb_type_30_error, desc));
2132
2133 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2134 ipr_log64_fabric_path(hostrcb, fabric);
2135 for_each_fabric_cfg(fabric, cfg)
2136 ipr_log64_path_elem(hostrcb, cfg);
2137
2138 add_len -= be16_to_cpu(fabric->length);
2139 fabric = (struct ipr_hostrcb64_fabric_desc *)
2140 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2141 }
2142
2143 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2144}
2145
2146/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 * ipr_log_generic_error - Log an adapter error.
2148 * @ioa_cfg: ioa config struct
2149 * @hostrcb: hostrcb struct
2150 *
2151 * Return value:
2152 * none
2153 **/
2154static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2155 struct ipr_hostrcb *hostrcb)
2156{
Brian Kingac719ab2006-11-21 10:28:42 -06002157 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002158 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159}
2160
2161/**
2162 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2163 * @ioasc: IOASC
2164 *
2165 * This function will return the index of into the ipr_error_table
2166 * for the specified IOASC. If the IOASC is not in the table,
2167 * 0 will be returned, which points to the entry used for unknown errors.
2168 *
2169 * Return value:
2170 * index into the ipr_error_table
2171 **/
2172static u32 ipr_get_error(u32 ioasc)
2173{
2174 int i;
2175
2176 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002177 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 return i;
2179
2180 return 0;
2181}
2182
2183/**
2184 * ipr_handle_log_data - Log an adapter error.
2185 * @ioa_cfg: ioa config struct
2186 * @hostrcb: hostrcb struct
2187 *
2188 * This function logs an adapter error to the system.
2189 *
2190 * Return value:
2191 * none
2192 **/
2193static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2194 struct ipr_hostrcb *hostrcb)
2195{
2196 u32 ioasc;
2197 int error_index;
2198
2199 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2200 return;
2201
2202 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2203 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2204
Wayne Boyer4565e372010-02-19 13:24:07 -08002205 if (ioa_cfg->sis64)
2206 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2207 else
2208 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Wayne Boyer4565e372010-02-19 13:24:07 -08002210 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2211 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2213 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002214 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 }
2216
2217 error_index = ipr_get_error(ioasc);
2218
2219 if (!ipr_error_table[error_index].log_hcam)
2220 return;
2221
Brian King49dc6a12006-11-21 10:28:35 -06002222 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224 /* Set indication we have logged an error */
2225 ioa_cfg->errors_logged++;
2226
Brian King933916f2007-03-29 12:43:30 -05002227 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002229 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2230 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 case IPR_HOST_RCB_OVERLAY_ID_2:
2234 ipr_log_cache_error(ioa_cfg, hostrcb);
2235 break;
2236 case IPR_HOST_RCB_OVERLAY_ID_3:
2237 ipr_log_config_error(ioa_cfg, hostrcb);
2238 break;
2239 case IPR_HOST_RCB_OVERLAY_ID_4:
2240 case IPR_HOST_RCB_OVERLAY_ID_6:
2241 ipr_log_array_error(ioa_cfg, hostrcb);
2242 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002243 case IPR_HOST_RCB_OVERLAY_ID_7:
2244 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2245 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002246 case IPR_HOST_RCB_OVERLAY_ID_12:
2247 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2248 break;
2249 case IPR_HOST_RCB_OVERLAY_ID_13:
2250 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2251 break;
2252 case IPR_HOST_RCB_OVERLAY_ID_14:
2253 case IPR_HOST_RCB_OVERLAY_ID_16:
2254 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2255 break;
2256 case IPR_HOST_RCB_OVERLAY_ID_17:
2257 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2258 break;
Brian King49dc6a12006-11-21 10:28:35 -06002259 case IPR_HOST_RCB_OVERLAY_ID_20:
2260 ipr_log_fabric_error(ioa_cfg, hostrcb);
2261 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002262 case IPR_HOST_RCB_OVERLAY_ID_23:
2263 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2264 break;
2265 case IPR_HOST_RCB_OVERLAY_ID_24:
2266 case IPR_HOST_RCB_OVERLAY_ID_26:
2267 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2268 break;
2269 case IPR_HOST_RCB_OVERLAY_ID_30:
2270 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2271 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002272 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002275 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 break;
2277 }
2278}
2279
2280/**
2281 * ipr_process_error - Op done function for an adapter error log.
2282 * @ipr_cmd: ipr command struct
2283 *
2284 * This function is the op done function for an error log host
2285 * controlled async from the adapter. It will log the error and
2286 * send the HCAM back to the adapter.
2287 *
2288 * Return value:
2289 * none
2290 **/
2291static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2292{
2293 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2294 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2295 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002296 u32 fd_ioasc;
2297
2298 if (ioa_cfg->sis64)
2299 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2300 else
2301 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 list_del(&hostrcb->queue);
2304 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2305
2306 if (!ioasc) {
2307 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002308 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2309 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2311 dev_err(&ioa_cfg->pdev->dev,
2312 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2313 }
2314
2315 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2316}
2317
2318/**
2319 * ipr_timeout - An internally generated op has timed out.
2320 * @ipr_cmd: ipr command struct
2321 *
2322 * This function blocks host requests and initiates an
2323 * adapter reset.
2324 *
2325 * Return value:
2326 * none
2327 **/
2328static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2329{
2330 unsigned long lock_flags = 0;
2331 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2332
2333 ENTER;
2334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2335
2336 ioa_cfg->errors_logged++;
2337 dev_err(&ioa_cfg->pdev->dev,
2338 "Adapter being reset due to command timeout.\n");
2339
2340 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2341 ioa_cfg->sdt_state = GET_DUMP;
2342
2343 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2344 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2345
2346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2347 LEAVE;
2348}
2349
2350/**
2351 * ipr_oper_timeout - Adapter timed out transitioning to operational
2352 * @ipr_cmd: ipr command struct
2353 *
2354 * This function blocks host requests and initiates an
2355 * adapter reset.
2356 *
2357 * Return value:
2358 * none
2359 **/
2360static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2361{
2362 unsigned long lock_flags = 0;
2363 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2364
2365 ENTER;
2366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2367
2368 ioa_cfg->errors_logged++;
2369 dev_err(&ioa_cfg->pdev->dev,
2370 "Adapter timed out transitioning to operational.\n");
2371
2372 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2373 ioa_cfg->sdt_state = GET_DUMP;
2374
2375 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2376 if (ipr_fastfail)
2377 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2378 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2379 }
2380
2381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2382 LEAVE;
2383}
2384
2385/**
2386 * ipr_reset_reload - Reset/Reload the IOA
2387 * @ioa_cfg: ioa config struct
2388 * @shutdown_type: shutdown type
2389 *
2390 * This function resets the adapter and re-initializes it.
2391 * This function assumes that all new host commands have been stopped.
2392 * Return value:
2393 * SUCCESS / FAILED
2394 **/
2395static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2396 enum ipr_shutdown_type shutdown_type)
2397{
2398 if (!ioa_cfg->in_reset_reload)
2399 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2400
2401 spin_unlock_irq(ioa_cfg->host->host_lock);
2402 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2403 spin_lock_irq(ioa_cfg->host->host_lock);
2404
2405 /* If we got hit with a host reset while we were already resetting
2406 the adapter for some reason, and the reset failed. */
2407 if (ioa_cfg->ioa_is_dead) {
2408 ipr_trace;
2409 return FAILED;
2410 }
2411
2412 return SUCCESS;
2413}
2414
2415/**
2416 * ipr_find_ses_entry - Find matching SES in SES table
2417 * @res: resource entry struct of SES
2418 *
2419 * Return value:
2420 * pointer to SES table entry / NULL on failure
2421 **/
2422static const struct ipr_ses_table_entry *
2423ipr_find_ses_entry(struct ipr_resource_entry *res)
2424{
2425 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002426 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2428
2429 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2430 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2431 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002432 vpids = &res->std_inq_data.vpids;
2433 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 matches++;
2435 else
2436 break;
2437 } else
2438 matches++;
2439 }
2440
2441 if (matches == IPR_PROD_ID_LEN)
2442 return ste;
2443 }
2444
2445 return NULL;
2446}
2447
2448/**
2449 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2450 * @ioa_cfg: ioa config struct
2451 * @bus: SCSI bus
2452 * @bus_width: bus width
2453 *
2454 * Return value:
2455 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2456 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2457 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2458 * max 160MHz = max 320MB/sec).
2459 **/
2460static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2461{
2462 struct ipr_resource_entry *res;
2463 const struct ipr_ses_table_entry *ste;
2464 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2465
2466 /* Loop through each config table entry in the config table buffer */
2467 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002468 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 continue;
2470
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002471 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 continue;
2473
2474 if (!(ste = ipr_find_ses_entry(res)))
2475 continue;
2476
2477 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2478 }
2479
2480 return max_xfer_rate;
2481}
2482
2483/**
2484 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2485 * @ioa_cfg: ioa config struct
2486 * @max_delay: max delay in micro-seconds to wait
2487 *
2488 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2489 *
2490 * Return value:
2491 * 0 on success / other on failure
2492 **/
2493static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2494{
2495 volatile u32 pcii_reg;
2496 int delay = 1;
2497
2498 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2499 while (delay < max_delay) {
2500 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2501
2502 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2503 return 0;
2504
2505 /* udelay cannot be used if delay is more than a few milliseconds */
2506 if ((delay / 1000) > MAX_UDELAY_MS)
2507 mdelay(delay / 1000);
2508 else
2509 udelay(delay);
2510
2511 delay += delay;
2512 }
2513 return -EIO;
2514}
2515
2516/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002517 * ipr_get_sis64_dump_data_section - Dump IOA memory
2518 * @ioa_cfg: ioa config struct
2519 * @start_addr: adapter address to dump
2520 * @dest: destination kernel buffer
2521 * @length_in_words: length to dump in 4 byte words
2522 *
2523 * Return value:
2524 * 0 on success
2525 **/
2526static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2527 u32 start_addr,
2528 __be32 *dest, u32 length_in_words)
2529{
2530 int i;
2531
2532 for (i = 0; i < length_in_words; i++) {
2533 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2534 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2535 dest++;
2536 }
2537
2538 return 0;
2539}
2540
2541/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 * ipr_get_ldump_data_section - Dump IOA memory
2543 * @ioa_cfg: ioa config struct
2544 * @start_addr: adapter address to dump
2545 * @dest: destination kernel buffer
2546 * @length_in_words: length to dump in 4 byte words
2547 *
2548 * Return value:
2549 * 0 on success / -EIO on failure
2550 **/
2551static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2552 u32 start_addr,
2553 __be32 *dest, u32 length_in_words)
2554{
2555 volatile u32 temp_pcii_reg;
2556 int i, delay = 0;
2557
Wayne Boyerdcbad002010-02-19 13:24:14 -08002558 if (ioa_cfg->sis64)
2559 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2560 dest, length_in_words);
2561
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 /* Write IOA interrupt reg starting LDUMP state */
2563 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2564 ioa_cfg->regs.set_uproc_interrupt_reg);
2565
2566 /* Wait for IO debug acknowledge */
2567 if (ipr_wait_iodbg_ack(ioa_cfg,
2568 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2569 dev_err(&ioa_cfg->pdev->dev,
2570 "IOA dump long data transfer timeout\n");
2571 return -EIO;
2572 }
2573
2574 /* Signal LDUMP interlocked - clear IO debug ack */
2575 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2576 ioa_cfg->regs.clr_interrupt_reg);
2577
2578 /* Write Mailbox with starting address */
2579 writel(start_addr, ioa_cfg->ioa_mailbox);
2580
2581 /* Signal address valid - clear IOA Reset alert */
2582 writel(IPR_UPROCI_RESET_ALERT,
2583 ioa_cfg->regs.clr_uproc_interrupt_reg);
2584
2585 for (i = 0; i < length_in_words; i++) {
2586 /* Wait for IO debug acknowledge */
2587 if (ipr_wait_iodbg_ack(ioa_cfg,
2588 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2589 dev_err(&ioa_cfg->pdev->dev,
2590 "IOA dump short data transfer timeout\n");
2591 return -EIO;
2592 }
2593
2594 /* Read data from mailbox and increment destination pointer */
2595 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2596 dest++;
2597
2598 /* For all but the last word of data, signal data received */
2599 if (i < (length_in_words - 1)) {
2600 /* Signal dump data received - Clear IO debug Ack */
2601 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2602 ioa_cfg->regs.clr_interrupt_reg);
2603 }
2604 }
2605
2606 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2607 writel(IPR_UPROCI_RESET_ALERT,
2608 ioa_cfg->regs.set_uproc_interrupt_reg);
2609
2610 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2611 ioa_cfg->regs.clr_uproc_interrupt_reg);
2612
2613 /* Signal dump data received - Clear IO debug Ack */
2614 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2615 ioa_cfg->regs.clr_interrupt_reg);
2616
2617 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2618 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2619 temp_pcii_reg =
2620 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
2621
2622 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2623 return 0;
2624
2625 udelay(10);
2626 delay += 10;
2627 }
2628
2629 return 0;
2630}
2631
2632#ifdef CONFIG_SCSI_IPR_DUMP
2633/**
2634 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2635 * @ioa_cfg: ioa config struct
2636 * @pci_address: adapter address
2637 * @length: length of data to copy
2638 *
2639 * Copy data from PCI adapter to kernel buffer.
2640 * Note: length MUST be a 4 byte multiple
2641 * Return value:
2642 * 0 on success / other on failure
2643 **/
2644static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2645 unsigned long pci_address, u32 length)
2646{
2647 int bytes_copied = 0;
2648 int cur_len, rc, rem_len, rem_page_len;
2649 __be32 *page;
2650 unsigned long lock_flags = 0;
2651 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2652
2653 while (bytes_copied < length &&
2654 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2655 if (ioa_dump->page_offset >= PAGE_SIZE ||
2656 ioa_dump->page_offset == 0) {
2657 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2658
2659 if (!page) {
2660 ipr_trace;
2661 return bytes_copied;
2662 }
2663
2664 ioa_dump->page_offset = 0;
2665 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2666 ioa_dump->next_page_index++;
2667 } else
2668 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2669
2670 rem_len = length - bytes_copied;
2671 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2672 cur_len = min(rem_len, rem_page_len);
2673
2674 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2675 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2676 rc = -EIO;
2677 } else {
2678 rc = ipr_get_ldump_data_section(ioa_cfg,
2679 pci_address + bytes_copied,
2680 &page[ioa_dump->page_offset / 4],
2681 (cur_len / sizeof(u32)));
2682 }
2683 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2684
2685 if (!rc) {
2686 ioa_dump->page_offset += cur_len;
2687 bytes_copied += cur_len;
2688 } else {
2689 ipr_trace;
2690 break;
2691 }
2692 schedule();
2693 }
2694
2695 return bytes_copied;
2696}
2697
2698/**
2699 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2700 * @hdr: dump entry header struct
2701 *
2702 * Return value:
2703 * nothing
2704 **/
2705static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2706{
2707 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2708 hdr->num_elems = 1;
2709 hdr->offset = sizeof(*hdr);
2710 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2711}
2712
2713/**
2714 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2715 * @ioa_cfg: ioa config struct
2716 * @driver_dump: driver dump struct
2717 *
2718 * Return value:
2719 * nothing
2720 **/
2721static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2722 struct ipr_driver_dump *driver_dump)
2723{
2724 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2725
2726 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2727 driver_dump->ioa_type_entry.hdr.len =
2728 sizeof(struct ipr_dump_ioa_type_entry) -
2729 sizeof(struct ipr_dump_entry_header);
2730 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2731 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2732 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2733 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2734 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2735 ucode_vpd->minor_release[1];
2736 driver_dump->hdr.num_entries++;
2737}
2738
2739/**
2740 * ipr_dump_version_data - Fill in the driver version in the dump.
2741 * @ioa_cfg: ioa config struct
2742 * @driver_dump: driver dump struct
2743 *
2744 * Return value:
2745 * nothing
2746 **/
2747static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2748 struct ipr_driver_dump *driver_dump)
2749{
2750 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2751 driver_dump->version_entry.hdr.len =
2752 sizeof(struct ipr_dump_version_entry) -
2753 sizeof(struct ipr_dump_entry_header);
2754 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2755 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2756 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2757 driver_dump->hdr.num_entries++;
2758}
2759
2760/**
2761 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2762 * @ioa_cfg: ioa config struct
2763 * @driver_dump: driver dump struct
2764 *
2765 * Return value:
2766 * nothing
2767 **/
2768static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2769 struct ipr_driver_dump *driver_dump)
2770{
2771 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2772 driver_dump->trace_entry.hdr.len =
2773 sizeof(struct ipr_dump_trace_entry) -
2774 sizeof(struct ipr_dump_entry_header);
2775 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2776 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2777 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2778 driver_dump->hdr.num_entries++;
2779}
2780
2781/**
2782 * ipr_dump_location_data - Fill in the IOA location in the dump.
2783 * @ioa_cfg: ioa config struct
2784 * @driver_dump: driver dump struct
2785 *
2786 * Return value:
2787 * nothing
2788 **/
2789static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2790 struct ipr_driver_dump *driver_dump)
2791{
2792 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2793 driver_dump->location_entry.hdr.len =
2794 sizeof(struct ipr_dump_location_entry) -
2795 sizeof(struct ipr_dump_entry_header);
2796 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2797 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002798 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 driver_dump->hdr.num_entries++;
2800}
2801
2802/**
2803 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2804 * @ioa_cfg: ioa config struct
2805 * @dump: dump struct
2806 *
2807 * Return value:
2808 * nothing
2809 **/
2810static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2811{
2812 unsigned long start_addr, sdt_word;
2813 unsigned long lock_flags = 0;
2814 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2815 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2816 u32 num_entries, start_off, end_off;
2817 u32 bytes_to_copy, bytes_copied, rc;
2818 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08002819 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 int i;
2821
2822 ENTER;
2823
2824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2825
2826 if (ioa_cfg->sdt_state != GET_DUMP) {
2827 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2828 return;
2829 }
2830
2831 start_addr = readl(ioa_cfg->ioa_mailbox);
2832
Wayne Boyerdcbad002010-02-19 13:24:14 -08002833 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 dev_err(&ioa_cfg->pdev->dev,
2835 "Invalid dump table format: %lx\n", start_addr);
2836 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2837 return;
2838 }
2839
2840 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2841
2842 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2843
2844 /* Initialize the overall dump header */
2845 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2846 driver_dump->hdr.num_entries = 1;
2847 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2848 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2849 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2850 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2851
2852 ipr_dump_version_data(ioa_cfg, driver_dump);
2853 ipr_dump_location_data(ioa_cfg, driver_dump);
2854 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2855 ipr_dump_trace_data(ioa_cfg, driver_dump);
2856
2857 /* Update dump_header */
2858 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2859
2860 /* IOA Dump entry */
2861 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 ioa_dump->hdr.len = 0;
2863 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2864 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2865
2866 /* First entries in sdt are actually a list of dump addresses and
2867 lengths to gather the real dump data. sdt represents the pointer
2868 to the ioa generated dump table. Dump data will be extracted based
2869 on entries in this table */
2870 sdt = &ioa_dump->sdt;
2871
2872 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2873 sizeof(struct ipr_sdt) / sizeof(__be32));
2874
2875 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08002876 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2877 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 dev_err(&ioa_cfg->pdev->dev,
2879 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2880 rc, be32_to_cpu(sdt->hdr.state));
2881 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2882 ioa_cfg->sdt_state = DUMP_OBTAINED;
2883 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2884 return;
2885 }
2886
2887 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2888
2889 if (num_entries > IPR_NUM_SDT_ENTRIES)
2890 num_entries = IPR_NUM_SDT_ENTRIES;
2891
2892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2893
2894 for (i = 0; i < num_entries; i++) {
2895 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2896 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2897 break;
2898 }
2899
2900 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08002901 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2902 if (ioa_cfg->sis64)
2903 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2904 else {
2905 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2906 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Wayne Boyerdcbad002010-02-19 13:24:14 -08002908 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2909 bytes_to_copy = end_off - start_off;
2910 else
2911 valid = 0;
2912 }
2913 if (valid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2915 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2916 continue;
2917 }
2918
2919 /* Copy data from adapter to driver buffers */
2920 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2921 bytes_to_copy);
2922
2923 ioa_dump->hdr.len += bytes_copied;
2924
2925 if (bytes_copied != bytes_to_copy) {
2926 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2927 break;
2928 }
2929 }
2930 }
2931 }
2932
2933 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2934
2935 /* Update dump_header */
2936 driver_dump->hdr.len += ioa_dump->hdr.len;
2937 wmb();
2938 ioa_cfg->sdt_state = DUMP_OBTAINED;
2939 LEAVE;
2940}
2941
2942#else
2943#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2944#endif
2945
2946/**
2947 * ipr_release_dump - Free adapter dump memory
2948 * @kref: kref struct
2949 *
2950 * Return value:
2951 * nothing
2952 **/
2953static void ipr_release_dump(struct kref *kref)
2954{
2955 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2956 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2957 unsigned long lock_flags = 0;
2958 int i;
2959
2960 ENTER;
2961 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2962 ioa_cfg->dump = NULL;
2963 ioa_cfg->sdt_state = INACTIVE;
2964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2965
2966 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2967 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2968
2969 kfree(dump);
2970 LEAVE;
2971}
2972
2973/**
2974 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00002975 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 *
2977 * Called at task level from a work thread. This function takes care
2978 * of adding and removing device from the mid-layer as configuration
2979 * changes are detected by the adapter.
2980 *
2981 * Return value:
2982 * nothing
2983 **/
David Howellsc4028952006-11-22 14:57:56 +00002984static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985{
2986 unsigned long lock_flags;
2987 struct ipr_resource_entry *res;
2988 struct scsi_device *sdev;
2989 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00002990 struct ipr_ioa_cfg *ioa_cfg =
2991 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 u8 bus, target, lun;
2993 int did_work;
2994
2995 ENTER;
2996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2997
2998 if (ioa_cfg->sdt_state == GET_DUMP) {
2999 dump = ioa_cfg->dump;
3000 if (!dump) {
3001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3002 return;
3003 }
3004 kref_get(&dump->kref);
3005 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3006 ipr_get_ioa_dump(ioa_cfg, dump);
3007 kref_put(&dump->kref, ipr_release_dump);
3008
3009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3010 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3011 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3013 return;
3014 }
3015
3016restart:
3017 do {
3018 did_work = 0;
3019 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3021 return;
3022 }
3023
3024 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3025 if (res->del_from_ml && res->sdev) {
3026 did_work = 1;
3027 sdev = res->sdev;
3028 if (!scsi_device_get(sdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3031 scsi_remove_device(sdev);
3032 scsi_device_put(sdev);
3033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3034 }
3035 break;
3036 }
3037 }
3038 } while(did_work);
3039
3040 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3041 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003042 bus = res->bus;
3043 target = res->target;
3044 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003045 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3047 scsi_add_device(ioa_cfg->host, bus, target, lun);
3048 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3049 goto restart;
3050 }
3051 }
3052
3053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003054 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 LEAVE;
3056}
3057
3058#ifdef CONFIG_SCSI_IPR_TRACE
3059/**
3060 * ipr_read_trace - Dump the adapter trace
3061 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003062 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 * @buf: buffer
3064 * @off: offset
3065 * @count: buffer size
3066 *
3067 * Return value:
3068 * number of bytes printed to buffer
3069 **/
Zhang Rui91a69022007-06-09 13:57:22 +08003070static ssize_t ipr_read_trace(struct kobject *kobj,
3071 struct bin_attribute *bin_attr,
3072 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073{
Tony Jonesee959b02008-02-22 00:13:36 +01003074 struct device *dev = container_of(kobj, struct device, kobj);
3075 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3077 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003078 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079
3080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003081 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3082 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003084
3085 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086}
3087
3088static struct bin_attribute ipr_trace_attr = {
3089 .attr = {
3090 .name = "trace",
3091 .mode = S_IRUGO,
3092 },
3093 .size = 0,
3094 .read = ipr_read_trace,
3095};
3096#endif
3097
3098/**
3099 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003100 * @dev: class device struct
3101 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 *
3103 * Return value:
3104 * number of bytes printed to buffer
3105 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003106static ssize_t ipr_show_fw_version(struct device *dev,
3107 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108{
Tony Jonesee959b02008-02-22 00:13:36 +01003109 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3111 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3112 unsigned long lock_flags = 0;
3113 int len;
3114
3115 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3116 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3117 ucode_vpd->major_release, ucode_vpd->card_type,
3118 ucode_vpd->minor_release[0],
3119 ucode_vpd->minor_release[1]);
3120 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3121 return len;
3122}
3123
Tony Jonesee959b02008-02-22 00:13:36 +01003124static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 .attr = {
3126 .name = "fw_version",
3127 .mode = S_IRUGO,
3128 },
3129 .show = ipr_show_fw_version,
3130};
3131
3132/**
3133 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003134 * @dev: class device struct
3135 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 *
3137 * Return value:
3138 * number of bytes printed to buffer
3139 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003140static ssize_t ipr_show_log_level(struct device *dev,
3141 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142{
Tony Jonesee959b02008-02-22 00:13:36 +01003143 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3145 unsigned long lock_flags = 0;
3146 int len;
3147
3148 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3149 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3150 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3151 return len;
3152}
3153
3154/**
3155 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003156 * @dev: class device struct
3157 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 *
3159 * Return value:
3160 * number of bytes printed to buffer
3161 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003162static ssize_t ipr_store_log_level(struct device *dev,
3163 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 const char *buf, size_t count)
3165{
Tony Jonesee959b02008-02-22 00:13:36 +01003166 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3168 unsigned long lock_flags = 0;
3169
3170 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3171 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3172 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3173 return strlen(buf);
3174}
3175
Tony Jonesee959b02008-02-22 00:13:36 +01003176static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 .attr = {
3178 .name = "log_level",
3179 .mode = S_IRUGO | S_IWUSR,
3180 },
3181 .show = ipr_show_log_level,
3182 .store = ipr_store_log_level
3183};
3184
3185/**
3186 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003187 * @dev: device struct
3188 * @buf: buffer
3189 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 *
3191 * This function will reset the adapter and wait a reasonable
3192 * amount of time for any errors that the adapter might log.
3193 *
3194 * Return value:
3195 * count on success / other on failure
3196 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003197static ssize_t ipr_store_diagnostics(struct device *dev,
3198 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 const char *buf, size_t count)
3200{
Tony Jonesee959b02008-02-22 00:13:36 +01003201 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3203 unsigned long lock_flags = 0;
3204 int rc = count;
3205
3206 if (!capable(CAP_SYS_ADMIN))
3207 return -EACCES;
3208
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003210 while(ioa_cfg->in_reset_reload) {
3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3213 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3214 }
3215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 ioa_cfg->errors_logged = 0;
3217 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3218
3219 if (ioa_cfg->in_reset_reload) {
3220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3222
3223 /* Wait for a second for any errors to be logged */
3224 msleep(1000);
3225 } else {
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227 return -EIO;
3228 }
3229
3230 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3231 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3232 rc = -EIO;
3233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234
3235 return rc;
3236}
3237
Tony Jonesee959b02008-02-22 00:13:36 +01003238static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 .attr = {
3240 .name = "run_diagnostics",
3241 .mode = S_IWUSR,
3242 },
3243 .store = ipr_store_diagnostics
3244};
3245
3246/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003247 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003248 * @class_dev: device struct
3249 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003250 *
3251 * Return value:
3252 * number of bytes printed to buffer
3253 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003254static ssize_t ipr_show_adapter_state(struct device *dev,
3255 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003256{
Tony Jonesee959b02008-02-22 00:13:36 +01003257 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003258 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3259 unsigned long lock_flags = 0;
3260 int len;
3261
3262 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3263 if (ioa_cfg->ioa_is_dead)
3264 len = snprintf(buf, PAGE_SIZE, "offline\n");
3265 else
3266 len = snprintf(buf, PAGE_SIZE, "online\n");
3267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3268 return len;
3269}
3270
3271/**
3272 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003273 * @dev: device struct
3274 * @buf: buffer
3275 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003276 *
3277 * This function will change the adapter's state.
3278 *
3279 * Return value:
3280 * count on success / other on failure
3281 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003282static ssize_t ipr_store_adapter_state(struct device *dev,
3283 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003284 const char *buf, size_t count)
3285{
Tony Jonesee959b02008-02-22 00:13:36 +01003286 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003287 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3288 unsigned long lock_flags;
3289 int result = count;
3290
3291 if (!capable(CAP_SYS_ADMIN))
3292 return -EACCES;
3293
3294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3296 ioa_cfg->ioa_is_dead = 0;
3297 ioa_cfg->reset_retries = 0;
3298 ioa_cfg->in_ioa_bringdown = 0;
3299 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3300 }
3301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3303
3304 return result;
3305}
3306
Tony Jonesee959b02008-02-22 00:13:36 +01003307static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003308 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003309 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003310 .mode = S_IRUGO | S_IWUSR,
3311 },
3312 .show = ipr_show_adapter_state,
3313 .store = ipr_store_adapter_state
3314};
3315
3316/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003318 * @dev: device struct
3319 * @buf: buffer
3320 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 *
3322 * This function will reset the adapter.
3323 *
3324 * Return value:
3325 * count on success / other on failure
3326 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003327static ssize_t ipr_store_reset_adapter(struct device *dev,
3328 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 const char *buf, size_t count)
3330{
Tony Jonesee959b02008-02-22 00:13:36 +01003331 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3333 unsigned long lock_flags;
3334 int result = count;
3335
3336 if (!capable(CAP_SYS_ADMIN))
3337 return -EACCES;
3338
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340 if (!ioa_cfg->in_reset_reload)
3341 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3344
3345 return result;
3346}
3347
Tony Jonesee959b02008-02-22 00:13:36 +01003348static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 .attr = {
3350 .name = "reset_host",
3351 .mode = S_IWUSR,
3352 },
3353 .store = ipr_store_reset_adapter
3354};
3355
3356/**
3357 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3358 * @buf_len: buffer length
3359 *
3360 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3361 * list to use for microcode download
3362 *
3363 * Return value:
3364 * pointer to sglist / NULL on failure
3365 **/
3366static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3367{
3368 int sg_size, order, bsize_elem, num_elem, i, j;
3369 struct ipr_sglist *sglist;
3370 struct scatterlist *scatterlist;
3371 struct page *page;
3372
3373 /* Get the minimum size per scatter/gather element */
3374 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3375
3376 /* Get the actual size per element */
3377 order = get_order(sg_size);
3378
3379 /* Determine the actual number of bytes per element */
3380 bsize_elem = PAGE_SIZE * (1 << order);
3381
3382 /* Determine the actual number of sg entries needed */
3383 if (buf_len % bsize_elem)
3384 num_elem = (buf_len / bsize_elem) + 1;
3385 else
3386 num_elem = buf_len / bsize_elem;
3387
3388 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003389 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 (sizeof(struct scatterlist) * (num_elem - 1)),
3391 GFP_KERNEL);
3392
3393 if (sglist == NULL) {
3394 ipr_trace;
3395 return NULL;
3396 }
3397
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003399 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400
3401 sglist->order = order;
3402 sglist->num_sg = num_elem;
3403
3404 /* Allocate a bunch of sg elements */
3405 for (i = 0; i < num_elem; i++) {
3406 page = alloc_pages(GFP_KERNEL, order);
3407 if (!page) {
3408 ipr_trace;
3409
3410 /* Free up what we already allocated */
3411 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003412 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 kfree(sglist);
3414 return NULL;
3415 }
3416
Jens Axboe642f1492007-10-24 11:20:47 +02003417 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 }
3419
3420 return sglist;
3421}
3422
3423/**
3424 * ipr_free_ucode_buffer - Frees a microcode download buffer
3425 * @p_dnld: scatter/gather list pointer
3426 *
3427 * Free a DMA'able ucode download buffer previously allocated with
3428 * ipr_alloc_ucode_buffer
3429 *
3430 * Return value:
3431 * nothing
3432 **/
3433static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3434{
3435 int i;
3436
3437 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003438 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
3440 kfree(sglist);
3441}
3442
3443/**
3444 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3445 * @sglist: scatter/gather list pointer
3446 * @buffer: buffer pointer
3447 * @len: buffer length
3448 *
3449 * Copy a microcode image from a user buffer into a buffer allocated by
3450 * ipr_alloc_ucode_buffer
3451 *
3452 * Return value:
3453 * 0 on success / other on failure
3454 **/
3455static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3456 u8 *buffer, u32 len)
3457{
3458 int bsize_elem, i, result = 0;
3459 struct scatterlist *scatterlist;
3460 void *kaddr;
3461
3462 /* Determine the actual number of bytes per element */
3463 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3464
3465 scatterlist = sglist->scatterlist;
3466
3467 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003468 struct page *page = sg_page(&scatterlist[i]);
3469
3470 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003472 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473
3474 scatterlist[i].length = bsize_elem;
3475
3476 if (result != 0) {
3477 ipr_trace;
3478 return result;
3479 }
3480 }
3481
3482 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003483 struct page *page = sg_page(&scatterlist[i]);
3484
3485 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003487 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
3489 scatterlist[i].length = len % bsize_elem;
3490 }
3491
3492 sglist->buffer_len = len;
3493 return result;
3494}
3495
3496/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003497 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3498 * @ipr_cmd: ipr command struct
3499 * @sglist: scatter/gather list
3500 *
3501 * Builds a microcode download IOA data list (IOADL).
3502 *
3503 **/
3504static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3505 struct ipr_sglist *sglist)
3506{
3507 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3508 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3509 struct scatterlist *scatterlist = sglist->scatterlist;
3510 int i;
3511
3512 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3513 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3514 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3515
3516 ioarcb->ioadl_len =
3517 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3518 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3519 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3520 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3521 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3522 }
3523
3524 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3525}
3526
3527/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003528 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 * @ipr_cmd: ipr command struct
3530 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003532 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003535static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3536 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003539 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 struct scatterlist *scatterlist = sglist->scatterlist;
3541 int i;
3542
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003543 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003545 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3546
3547 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3549
3550 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3551 ioadl[i].flags_and_data_len =
3552 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3553 ioadl[i].address =
3554 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3555 }
3556
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003557 ioadl[i-1].flags_and_data_len |=
3558 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3559}
3560
3561/**
3562 * ipr_update_ioa_ucode - Update IOA's microcode
3563 * @ioa_cfg: ioa config struct
3564 * @sglist: scatter/gather list
3565 *
3566 * Initiate an adapter reset to update the IOA's microcode
3567 *
3568 * Return value:
3569 * 0 on success / -EIO on failure
3570 **/
3571static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3572 struct ipr_sglist *sglist)
3573{
3574 unsigned long lock_flags;
3575
3576 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003577 while(ioa_cfg->in_reset_reload) {
3578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3579 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3580 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3581 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003582
3583 if (ioa_cfg->ucode_sglist) {
3584 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3585 dev_err(&ioa_cfg->pdev->dev,
3586 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 return -EIO;
3588 }
3589
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003590 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3591 sglist->num_sg, DMA_TO_DEVICE);
3592
3593 if (!sglist->num_dma_sg) {
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595 dev_err(&ioa_cfg->pdev->dev,
3596 "Failed to map microcode download buffer!\n");
3597 return -EIO;
3598 }
3599
3600 ioa_cfg->ucode_sglist = sglist;
3601 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3603 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3604
3605 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3606 ioa_cfg->ucode_sglist = NULL;
3607 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 return 0;
3609}
3610
3611/**
3612 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003613 * @class_dev: device struct
3614 * @buf: buffer
3615 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 *
3617 * This function will update the firmware on the adapter.
3618 *
3619 * Return value:
3620 * count on success / other on failure
3621 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003622static ssize_t ipr_store_update_fw(struct device *dev,
3623 struct device_attribute *attr,
3624 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625{
Tony Jonesee959b02008-02-22 00:13:36 +01003626 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3628 struct ipr_ucode_image_header *image_hdr;
3629 const struct firmware *fw_entry;
3630 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 char fname[100];
3632 char *src;
3633 int len, result, dnld_size;
3634
3635 if (!capable(CAP_SYS_ADMIN))
3636 return -EACCES;
3637
3638 len = snprintf(fname, 99, "%s", buf);
3639 fname[len-1] = '\0';
3640
3641 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3642 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3643 return -EIO;
3644 }
3645
3646 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3647
3648 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3649 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3650 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3651 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3652 release_firmware(fw_entry);
3653 return -EINVAL;
3654 }
3655
3656 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3657 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3658 sglist = ipr_alloc_ucode_buffer(dnld_size);
3659
3660 if (!sglist) {
3661 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3662 release_firmware(fw_entry);
3663 return -ENOMEM;
3664 }
3665
3666 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3667
3668 if (result) {
3669 dev_err(&ioa_cfg->pdev->dev,
3670 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003671 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 }
3673
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003674 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003676 if (!result)
3677 result = count;
3678out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 ipr_free_ucode_buffer(sglist);
3680 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003681 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682}
3683
Tony Jonesee959b02008-02-22 00:13:36 +01003684static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 .attr = {
3686 .name = "update_fw",
3687 .mode = S_IWUSR,
3688 },
3689 .store = ipr_store_update_fw
3690};
3691
Tony Jonesee959b02008-02-22 00:13:36 +01003692static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 &ipr_fw_version_attr,
3694 &ipr_log_level_attr,
3695 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003696 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 &ipr_ioa_reset_attr,
3698 &ipr_update_fw_attr,
3699 NULL,
3700};
3701
3702#ifdef CONFIG_SCSI_IPR_DUMP
3703/**
3704 * ipr_read_dump - Dump the adapter
3705 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003706 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 * @buf: buffer
3708 * @off: offset
3709 * @count: buffer size
3710 *
3711 * Return value:
3712 * number of bytes printed to buffer
3713 **/
Zhang Rui91a69022007-06-09 13:57:22 +08003714static ssize_t ipr_read_dump(struct kobject *kobj,
3715 struct bin_attribute *bin_attr,
3716 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717{
Tony Jonesee959b02008-02-22 00:13:36 +01003718 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 struct Scsi_Host *shost = class_to_shost(cdev);
3720 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3721 struct ipr_dump *dump;
3722 unsigned long lock_flags = 0;
3723 char *src;
3724 int len;
3725 size_t rc = count;
3726
3727 if (!capable(CAP_SYS_ADMIN))
3728 return -EACCES;
3729
3730 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3731 dump = ioa_cfg->dump;
3732
3733 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3734 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3735 return 0;
3736 }
3737 kref_get(&dump->kref);
3738 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3739
3740 if (off > dump->driver_dump.hdr.len) {
3741 kref_put(&dump->kref, ipr_release_dump);
3742 return 0;
3743 }
3744
3745 if (off + count > dump->driver_dump.hdr.len) {
3746 count = dump->driver_dump.hdr.len - off;
3747 rc = count;
3748 }
3749
3750 if (count && off < sizeof(dump->driver_dump)) {
3751 if (off + count > sizeof(dump->driver_dump))
3752 len = sizeof(dump->driver_dump) - off;
3753 else
3754 len = count;
3755 src = (u8 *)&dump->driver_dump + off;
3756 memcpy(buf, src, len);
3757 buf += len;
3758 off += len;
3759 count -= len;
3760 }
3761
3762 off -= sizeof(dump->driver_dump);
3763
3764 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3765 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3766 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3767 else
3768 len = count;
3769 src = (u8 *)&dump->ioa_dump + off;
3770 memcpy(buf, src, len);
3771 buf += len;
3772 off += len;
3773 count -= len;
3774 }
3775
3776 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3777
3778 while (count) {
3779 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3780 len = PAGE_ALIGN(off) - off;
3781 else
3782 len = count;
3783 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3784 src += off & ~PAGE_MASK;
3785 memcpy(buf, src, len);
3786 buf += len;
3787 off += len;
3788 count -= len;
3789 }
3790
3791 kref_put(&dump->kref, ipr_release_dump);
3792 return rc;
3793}
3794
3795/**
3796 * ipr_alloc_dump - Prepare for adapter dump
3797 * @ioa_cfg: ioa config struct
3798 *
3799 * Return value:
3800 * 0 on success / other on failure
3801 **/
3802static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3803{
3804 struct ipr_dump *dump;
3805 unsigned long lock_flags = 0;
3806
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003807 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808
3809 if (!dump) {
3810 ipr_err("Dump memory allocation failed\n");
3811 return -ENOMEM;
3812 }
3813
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 kref_init(&dump->kref);
3815 dump->ioa_cfg = ioa_cfg;
3816
3817 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3818
3819 if (INACTIVE != ioa_cfg->sdt_state) {
3820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3821 kfree(dump);
3822 return 0;
3823 }
3824
3825 ioa_cfg->dump = dump;
3826 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3827 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3828 ioa_cfg->dump_taken = 1;
3829 schedule_work(&ioa_cfg->work_q);
3830 }
3831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3832
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 return 0;
3834}
3835
3836/**
3837 * ipr_free_dump - Free adapter dump memory
3838 * @ioa_cfg: ioa config struct
3839 *
3840 * Return value:
3841 * 0 on success / other on failure
3842 **/
3843static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3844{
3845 struct ipr_dump *dump;
3846 unsigned long lock_flags = 0;
3847
3848 ENTER;
3849
3850 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3851 dump = ioa_cfg->dump;
3852 if (!dump) {
3853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3854 return 0;
3855 }
3856
3857 ioa_cfg->dump = NULL;
3858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3859
3860 kref_put(&dump->kref, ipr_release_dump);
3861
3862 LEAVE;
3863 return 0;
3864}
3865
3866/**
3867 * ipr_write_dump - Setup dump state of adapter
3868 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003869 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 * @buf: buffer
3871 * @off: offset
3872 * @count: buffer size
3873 *
3874 * Return value:
3875 * number of bytes printed to buffer
3876 **/
Zhang Rui91a69022007-06-09 13:57:22 +08003877static ssize_t ipr_write_dump(struct kobject *kobj,
3878 struct bin_attribute *bin_attr,
3879 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880{
Tony Jonesee959b02008-02-22 00:13:36 +01003881 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 struct Scsi_Host *shost = class_to_shost(cdev);
3883 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3884 int rc;
3885
3886 if (!capable(CAP_SYS_ADMIN))
3887 return -EACCES;
3888
3889 if (buf[0] == '1')
3890 rc = ipr_alloc_dump(ioa_cfg);
3891 else if (buf[0] == '0')
3892 rc = ipr_free_dump(ioa_cfg);
3893 else
3894 return -EINVAL;
3895
3896 if (rc)
3897 return rc;
3898 else
3899 return count;
3900}
3901
3902static struct bin_attribute ipr_dump_attr = {
3903 .attr = {
3904 .name = "dump",
3905 .mode = S_IRUSR | S_IWUSR,
3906 },
3907 .size = 0,
3908 .read = ipr_read_dump,
3909 .write = ipr_write_dump
3910};
3911#else
3912static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3913#endif
3914
3915/**
3916 * ipr_change_queue_depth - Change the device's queue depth
3917 * @sdev: scsi device struct
3918 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07003919 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 *
3921 * Return value:
3922 * actual depth set
3923 **/
Mike Christiee881a172009-10-15 17:46:39 -07003924static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3925 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926{
Brian King35a39692006-09-25 12:39:20 -05003927 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3928 struct ipr_resource_entry *res;
3929 unsigned long lock_flags = 0;
3930
Mike Christiee881a172009-10-15 17:46:39 -07003931 if (reason != SCSI_QDEPTH_DEFAULT)
3932 return -EOPNOTSUPP;
3933
Brian King35a39692006-09-25 12:39:20 -05003934 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3935 res = (struct ipr_resource_entry *)sdev->hostdata;
3936
3937 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3938 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3942 return sdev->queue_depth;
3943}
3944
3945/**
3946 * ipr_change_queue_type - Change the device's queue type
3947 * @dsev: scsi device struct
3948 * @tag_type: type of tags to use
3949 *
3950 * Return value:
3951 * actual queue type set
3952 **/
3953static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3954{
3955 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3956 struct ipr_resource_entry *res;
3957 unsigned long lock_flags = 0;
3958
3959 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3960 res = (struct ipr_resource_entry *)sdev->hostdata;
3961
3962 if (res) {
3963 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3964 /*
3965 * We don't bother quiescing the device here since the
3966 * adapter firmware does it for us.
3967 */
3968 scsi_set_tag_type(sdev, tag_type);
3969
3970 if (tag_type)
3971 scsi_activate_tcq(sdev, sdev->queue_depth);
3972 else
3973 scsi_deactivate_tcq(sdev, sdev->queue_depth);
3974 } else
3975 tag_type = 0;
3976 } else
3977 tag_type = 0;
3978
3979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3980 return tag_type;
3981}
3982
3983/**
3984 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3985 * @dev: device struct
3986 * @buf: buffer
3987 *
3988 * Return value:
3989 * number of bytes printed to buffer
3990 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04003991static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992{
3993 struct scsi_device *sdev = to_scsi_device(dev);
3994 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3995 struct ipr_resource_entry *res;
3996 unsigned long lock_flags = 0;
3997 ssize_t len = -ENXIO;
3998
3999 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4000 res = (struct ipr_resource_entry *)sdev->hostdata;
4001 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004002 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004 return len;
4005}
4006
4007static struct device_attribute ipr_adapter_handle_attr = {
4008 .attr = {
4009 .name = "adapter_handle",
4010 .mode = S_IRUSR,
4011 },
4012 .show = ipr_show_adapter_handle
4013};
4014
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004015/**
4016 * ipr_show_resource_path - Show the resource path for this device.
4017 * @dev: device struct
4018 * @buf: buffer
4019 *
4020 * Return value:
4021 * number of bytes printed to buffer
4022 **/
4023static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4024{
4025 struct scsi_device *sdev = to_scsi_device(dev);
4026 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4027 struct ipr_resource_entry *res;
4028 unsigned long lock_flags = 0;
4029 ssize_t len = -ENXIO;
4030 char buffer[IPR_MAX_RES_PATH_LENGTH];
4031
4032 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4033 res = (struct ipr_resource_entry *)sdev->hostdata;
4034 if (res)
4035 len = snprintf(buf, PAGE_SIZE, "%s\n",
4036 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038 return len;
4039}
4040
4041static struct device_attribute ipr_resource_path_attr = {
4042 .attr = {
4043 .name = "resource_path",
4044 .mode = S_IRUSR,
4045 },
4046 .show = ipr_show_resource_path
4047};
4048
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049static struct device_attribute *ipr_dev_attrs[] = {
4050 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004051 &ipr_resource_path_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 NULL,
4053};
4054
4055/**
4056 * ipr_biosparam - Return the HSC mapping
4057 * @sdev: scsi device struct
4058 * @block_device: block device pointer
4059 * @capacity: capacity of the device
4060 * @parm: Array containing returned HSC values.
4061 *
4062 * This function generates the HSC parms that fdisk uses.
4063 * We want to make sure we return something that places partitions
4064 * on 4k boundaries for best performance with the IOA.
4065 *
4066 * Return value:
4067 * 0 on success
4068 **/
4069static int ipr_biosparam(struct scsi_device *sdev,
4070 struct block_device *block_device,
4071 sector_t capacity, int *parm)
4072{
4073 int heads, sectors;
4074 sector_t cylinders;
4075
4076 heads = 128;
4077 sectors = 32;
4078
4079 cylinders = capacity;
4080 sector_div(cylinders, (128 * 32));
4081
4082 /* return result */
4083 parm[0] = heads;
4084 parm[1] = sectors;
4085 parm[2] = cylinders;
4086
4087 return 0;
4088}
4089
4090/**
Brian King35a39692006-09-25 12:39:20 -05004091 * ipr_find_starget - Find target based on bus/target.
4092 * @starget: scsi target struct
4093 *
4094 * Return value:
4095 * resource entry pointer if found / NULL if not found
4096 **/
4097static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4098{
4099 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4100 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4101 struct ipr_resource_entry *res;
4102
4103 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004104 if ((res->bus == starget->channel) &&
4105 (res->target == starget->id) &&
4106 (res->lun == 0)) {
Brian King35a39692006-09-25 12:39:20 -05004107 return res;
4108 }
4109 }
4110
4111 return NULL;
4112}
4113
4114static struct ata_port_info sata_port_info;
4115
4116/**
4117 * ipr_target_alloc - Prepare for commands to a SCSI target
4118 * @starget: scsi target struct
4119 *
4120 * If the device is a SATA device, this function allocates an
4121 * ATA port with libata, else it does nothing.
4122 *
4123 * Return value:
4124 * 0 on success / non-0 on failure
4125 **/
4126static int ipr_target_alloc(struct scsi_target *starget)
4127{
4128 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4129 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4130 struct ipr_sata_port *sata_port;
4131 struct ata_port *ap;
4132 struct ipr_resource_entry *res;
4133 unsigned long lock_flags;
4134
4135 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4136 res = ipr_find_starget(starget);
4137 starget->hostdata = NULL;
4138
4139 if (res && ipr_is_gata(res)) {
4140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4141 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4142 if (!sata_port)
4143 return -ENOMEM;
4144
4145 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4146 if (ap) {
4147 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4148 sata_port->ioa_cfg = ioa_cfg;
4149 sata_port->ap = ap;
4150 sata_port->res = res;
4151
4152 res->sata_port = sata_port;
4153 ap->private_data = sata_port;
4154 starget->hostdata = sata_port;
4155 } else {
4156 kfree(sata_port);
4157 return -ENOMEM;
4158 }
4159 }
4160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4161
4162 return 0;
4163}
4164
4165/**
4166 * ipr_target_destroy - Destroy a SCSI target
4167 * @starget: scsi target struct
4168 *
4169 * If the device was a SATA device, this function frees the libata
4170 * ATA port, else it does nothing.
4171 *
4172 **/
4173static void ipr_target_destroy(struct scsi_target *starget)
4174{
4175 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004176 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4177 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4178
4179 if (ioa_cfg->sis64) {
4180 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4181 clear_bit(starget->id, ioa_cfg->array_ids);
4182 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4183 clear_bit(starget->id, ioa_cfg->vset_ids);
4184 else if (starget->channel == 0)
4185 clear_bit(starget->id, ioa_cfg->target_ids);
4186 }
Brian King35a39692006-09-25 12:39:20 -05004187
4188 if (sata_port) {
4189 starget->hostdata = NULL;
4190 ata_sas_port_destroy(sata_port->ap);
4191 kfree(sata_port);
4192 }
4193}
4194
4195/**
4196 * ipr_find_sdev - Find device based on bus/target/lun.
4197 * @sdev: scsi device struct
4198 *
4199 * Return value:
4200 * resource entry pointer if found / NULL if not found
4201 **/
4202static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4203{
4204 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4205 struct ipr_resource_entry *res;
4206
4207 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004208 if ((res->bus == sdev->channel) &&
4209 (res->target == sdev->id) &&
4210 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004211 return res;
4212 }
4213
4214 return NULL;
4215}
4216
4217/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 * ipr_slave_destroy - Unconfigure a SCSI device
4219 * @sdev: scsi device struct
4220 *
4221 * Return value:
4222 * nothing
4223 **/
4224static void ipr_slave_destroy(struct scsi_device *sdev)
4225{
4226 struct ipr_resource_entry *res;
4227 struct ipr_ioa_cfg *ioa_cfg;
4228 unsigned long lock_flags = 0;
4229
4230 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4231
4232 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4233 res = (struct ipr_resource_entry *) sdev->hostdata;
4234 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004235 if (res->sata_port)
4236 ata_port_disable(res->sata_port->ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 sdev->hostdata = NULL;
4238 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004239 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 }
4241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4242}
4243
4244/**
4245 * ipr_slave_configure - Configure a SCSI device
4246 * @sdev: scsi device struct
4247 *
4248 * This function configures the specified scsi device.
4249 *
4250 * Return value:
4251 * 0 on success
4252 **/
4253static int ipr_slave_configure(struct scsi_device *sdev)
4254{
4255 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4256 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004257 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004259 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260
4261 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4262 res = sdev->hostdata;
4263 if (res) {
4264 if (ipr_is_af_dasd_device(res))
4265 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004266 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004268 sdev->no_uld_attach = 1;
4269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004271 blk_queue_rq_timeout(sdev->request_queue,
4272 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004273 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 }
Brian Kinge4fbf442006-03-29 09:37:22 -06004275 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 sdev->allow_restart = 1;
Brian Kingdd406ef2009-04-22 08:58:02 -05004277 if (ipr_is_gata(res) && res->sata_port)
4278 ap = res->sata_port->ap;
4279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4280
4281 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004282 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004283 ata_sas_slave_configure(sdev, ap);
4284 } else
Brian King35a39692006-09-25 12:39:20 -05004285 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004286 if (ioa_cfg->sis64)
4287 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4288 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
Brian Kingdd406ef2009-04-22 08:58:02 -05004289 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 }
4291 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4292 return 0;
4293}
4294
4295/**
Brian King35a39692006-09-25 12:39:20 -05004296 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4297 * @sdev: scsi device struct
4298 *
4299 * This function initializes an ATA port so that future commands
4300 * sent through queuecommand will work.
4301 *
4302 * Return value:
4303 * 0 on success
4304 **/
4305static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4306{
4307 struct ipr_sata_port *sata_port = NULL;
4308 int rc = -ENXIO;
4309
4310 ENTER;
4311 if (sdev->sdev_target)
4312 sata_port = sdev->sdev_target->hostdata;
4313 if (sata_port)
4314 rc = ata_sas_port_init(sata_port->ap);
4315 if (rc)
4316 ipr_slave_destroy(sdev);
4317
4318 LEAVE;
4319 return rc;
4320}
4321
4322/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 * ipr_slave_alloc - Prepare for commands to a device.
4324 * @sdev: scsi device struct
4325 *
4326 * This function saves a pointer to the resource entry
4327 * in the scsi device struct if the device exists. We
4328 * can then use this pointer in ipr_queuecommand when
4329 * handling new commands.
4330 *
4331 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004332 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 **/
4334static int ipr_slave_alloc(struct scsi_device *sdev)
4335{
4336 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4337 struct ipr_resource_entry *res;
4338 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004339 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340
4341 sdev->hostdata = NULL;
4342
4343 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4344
Brian King35a39692006-09-25 12:39:20 -05004345 res = ipr_find_sdev(sdev);
4346 if (res) {
4347 res->sdev = sdev;
4348 res->add_to_ml = 0;
4349 res->in_erp = 0;
4350 sdev->hostdata = res;
4351 if (!ipr_is_naca_model(res))
4352 res->needs_sync_complete = 1;
4353 rc = 0;
4354 if (ipr_is_gata(res)) {
4355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4356 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 }
4358 }
4359
4360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4361
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004362 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363}
4364
4365/**
4366 * ipr_eh_host_reset - Reset the host adapter
4367 * @scsi_cmd: scsi command struct
4368 *
4369 * Return value:
4370 * SUCCESS / FAILED
4371 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04004372static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373{
4374 struct ipr_ioa_cfg *ioa_cfg;
4375 int rc;
4376
4377 ENTER;
4378 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4379
4380 dev_err(&ioa_cfg->pdev->dev,
4381 "Adapter being reset as a result of error recovery.\n");
4382
4383 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4384 ioa_cfg->sdt_state = GET_DUMP;
4385
4386 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4387
4388 LEAVE;
4389 return rc;
4390}
4391
Jeff Garzik df0ae242005-05-28 07:57:14 -04004392static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4393{
4394 int rc;
4395
4396 spin_lock_irq(cmd->device->host->host_lock);
4397 rc = __ipr_eh_host_reset(cmd);
4398 spin_unlock_irq(cmd->device->host->host_lock);
4399
4400 return rc;
4401}
4402
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403/**
Brian Kingc6513092006-03-29 09:37:43 -06004404 * ipr_device_reset - Reset the device
4405 * @ioa_cfg: ioa config struct
4406 * @res: resource entry struct
4407 *
4408 * This function issues a device reset to the affected device.
4409 * If the device is a SCSI device, a LUN reset will be sent
4410 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004411 * will be sent. If the device is a SATA device, a PHY reset will
4412 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004413 *
4414 * Return value:
4415 * 0 on success / non-zero on failure
4416 **/
4417static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4418 struct ipr_resource_entry *res)
4419{
4420 struct ipr_cmnd *ipr_cmd;
4421 struct ipr_ioarcb *ioarcb;
4422 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004423 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004424 u32 ioasc;
4425
4426 ENTER;
4427 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4428 ioarcb = &ipr_cmd->ioarcb;
4429 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004430
4431 if (ipr_cmd->ioa_cfg->sis64) {
4432 regs = &ipr_cmd->i.ata_ioadl.regs;
4433 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4434 } else
4435 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004436
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004437 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004438 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4439 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004440 if (ipr_is_gata(res)) {
4441 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004442 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004443 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4444 }
Brian Kingc6513092006-03-29 09:37:43 -06004445
4446 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4447 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4448 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
Brian King35a39692006-09-25 12:39:20 -05004449 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4450 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4451 sizeof(struct ipr_ioasa_gata));
Brian Kingc6513092006-03-29 09:37:43 -06004452
4453 LEAVE;
4454 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4455}
4456
4457/**
Brian King35a39692006-09-25 12:39:20 -05004458 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004459 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004460 * @classes: class of the attached device
4461 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004462 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004463 *
4464 * Return value:
4465 * 0 on success / non-zero on failure
4466 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004467static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004468 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004469{
Tejun Heocc0680a2007-08-06 18:36:23 +09004470 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004471 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4472 struct ipr_resource_entry *res;
4473 unsigned long lock_flags = 0;
4474 int rc = -ENXIO;
4475
4476 ENTER;
4477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King73d98ff2006-11-21 10:27:58 -06004478 while(ioa_cfg->in_reset_reload) {
4479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4480 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4482 }
4483
Brian King35a39692006-09-25 12:39:20 -05004484 res = sata_port->res;
4485 if (res) {
4486 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004487 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004488 }
4489
4490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4491 LEAVE;
4492 return rc;
4493}
4494
4495/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 * ipr_eh_dev_reset - Reset the device
4497 * @scsi_cmd: scsi command struct
4498 *
4499 * This function issues a device reset to the affected device.
4500 * A LUN reset will be sent to the device first. If that does
4501 * not work, a target reset will be sent.
4502 *
4503 * Return value:
4504 * SUCCESS / FAILED
4505 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004506static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507{
4508 struct ipr_cmnd *ipr_cmd;
4509 struct ipr_ioa_cfg *ioa_cfg;
4510 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004511 struct ata_port *ap;
4512 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
4514 ENTER;
4515 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4516 res = scsi_cmd->device->hostdata;
4517
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004518 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519 return FAILED;
4520
4521 /*
4522 * If we are currently going through reset/reload, return failed. This will force the
4523 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4524 * reset to complete
4525 */
4526 if (ioa_cfg->in_reset_reload)
4527 return FAILED;
4528 if (ioa_cfg->ioa_is_dead)
4529 return FAILED;
4530
4531 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004532 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533 if (ipr_cmd->scsi_cmd)
4534 ipr_cmd->done = ipr_scsi_eh_done;
Brian King24d6f2b2007-03-29 12:43:17 -05004535 if (ipr_cmd->qc)
4536 ipr_cmd->done = ipr_sata_eh_done;
Brian King7402ece2006-11-21 10:28:23 -06004537 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4538 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4539 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541 }
4542 }
4543
4544 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004545 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05004546
4547 if (ipr_is_gata(res) && res->sata_port) {
4548 ap = res->sata_port->ap;
4549 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09004550 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05004551 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05004552
4553 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004554 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King5af23d22007-05-09 15:36:35 -05004555 rc = -EIO;
4556 break;
4557 }
4558 }
Brian King35a39692006-09-25 12:39:20 -05004559 } else
4560 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 res->resetting_device = 0;
4562
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 LEAVE;
Brian Kingc6513092006-03-29 09:37:43 -06004564 return (rc ? FAILED : SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565}
4566
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004567static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4568{
4569 int rc;
4570
4571 spin_lock_irq(cmd->device->host->host_lock);
4572 rc = __ipr_eh_dev_reset(cmd);
4573 spin_unlock_irq(cmd->device->host->host_lock);
4574
4575 return rc;
4576}
4577
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578/**
4579 * ipr_bus_reset_done - Op done function for bus reset.
4580 * @ipr_cmd: ipr command struct
4581 *
4582 * This function is the op done function for a bus reset
4583 *
4584 * Return value:
4585 * none
4586 **/
4587static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4588{
4589 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4590 struct ipr_resource_entry *res;
4591
4592 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004593 if (!ioa_cfg->sis64)
4594 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4595 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4596 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4597 break;
4598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600
4601 /*
4602 * If abort has not completed, indicate the reset has, else call the
4603 * abort's done function to wake the sleeping eh thread
4604 */
4605 if (ipr_cmd->sibling->sibling)
4606 ipr_cmd->sibling->sibling = NULL;
4607 else
4608 ipr_cmd->sibling->done(ipr_cmd->sibling);
4609
4610 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4611 LEAVE;
4612}
4613
4614/**
4615 * ipr_abort_timeout - An abort task has timed out
4616 * @ipr_cmd: ipr command struct
4617 *
4618 * This function handles when an abort task times out. If this
4619 * happens we issue a bus reset since we have resources tied
4620 * up that must be freed before returning to the midlayer.
4621 *
4622 * Return value:
4623 * none
4624 **/
4625static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4626{
4627 struct ipr_cmnd *reset_cmd;
4628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4629 struct ipr_cmd_pkt *cmd_pkt;
4630 unsigned long lock_flags = 0;
4631
4632 ENTER;
4633 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4634 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4635 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4636 return;
4637 }
4638
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004639 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4641 ipr_cmd->sibling = reset_cmd;
4642 reset_cmd->sibling = ipr_cmd;
4643 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4644 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4645 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4646 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4647 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4648
4649 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4651 LEAVE;
4652}
4653
4654/**
4655 * ipr_cancel_op - Cancel specified op
4656 * @scsi_cmd: scsi command struct
4657 *
4658 * This function cancels specified op.
4659 *
4660 * Return value:
4661 * SUCCESS / FAILED
4662 **/
4663static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4664{
4665 struct ipr_cmnd *ipr_cmd;
4666 struct ipr_ioa_cfg *ioa_cfg;
4667 struct ipr_resource_entry *res;
4668 struct ipr_cmd_pkt *cmd_pkt;
4669 u32 ioasc;
4670 int op_found = 0;
4671
4672 ENTER;
4673 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4674 res = scsi_cmd->device->hostdata;
4675
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004676 /* If we are currently going through reset/reload, return failed.
4677 * This will force the mid-layer to call ipr_eh_host_reset,
4678 * which will then go to sleep and wait for the reset to complete
4679 */
4680 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4681 return FAILED;
Brian King04d97682006-11-21 10:28:04 -06004682 if (!res || !ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 return FAILED;
4684
4685 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4686 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4687 ipr_cmd->done = ipr_scsi_eh_done;
4688 op_found = 1;
4689 break;
4690 }
4691 }
4692
4693 if (!op_found)
4694 return SUCCESS;
4695
4696 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004697 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4699 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4700 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4701 ipr_cmd->u.sdev = scsi_cmd->device;
4702
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004703 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4704 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4706 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4707
4708 /*
4709 * If the abort task timed out and we sent a bus reset, we will get
4710 * one the following responses to the abort
4711 */
4712 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4713 ioasc = 0;
4714 ipr_trace;
4715 }
4716
4717 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004718 if (!ipr_is_naca_model(res))
4719 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720
4721 LEAVE;
4722 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4723}
4724
4725/**
4726 * ipr_eh_abort - Abort a single op
4727 * @scsi_cmd: scsi command struct
4728 *
4729 * Return value:
4730 * SUCCESS / FAILED
4731 **/
4732static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4733{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004734 unsigned long flags;
4735 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736
4737 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004739 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4740 rc = ipr_cancel_op(scsi_cmd);
4741 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742
4743 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004744 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745}
4746
4747/**
4748 * ipr_handle_other_interrupt - Handle "other" interrupts
4749 * @ioa_cfg: ioa config struct
4750 * @int_reg: interrupt register
4751 *
4752 * Return value:
4753 * IRQ_NONE / IRQ_HANDLED
4754 **/
4755static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4756 volatile u32 int_reg)
4757{
4758 irqreturn_t rc = IRQ_HANDLED;
4759
4760 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4761 /* Mask the interrupt */
4762 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4763
4764 /* Clear the interrupt */
4765 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4766 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4767
4768 list_del(&ioa_cfg->reset_cmd->queue);
4769 del_timer(&ioa_cfg->reset_cmd->timer);
4770 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4771 } else {
4772 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4773 ioa_cfg->ioa_unit_checked = 1;
4774 else
4775 dev_err(&ioa_cfg->pdev->dev,
4776 "Permanent IOA failure. 0x%08X\n", int_reg);
4777
4778 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4779 ioa_cfg->sdt_state = GET_DUMP;
4780
4781 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4782 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4783 }
4784
4785 return rc;
4786}
4787
4788/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004789 * ipr_isr_eh - Interrupt service routine error handler
4790 * @ioa_cfg: ioa config struct
4791 * @msg: message to log
4792 *
4793 * Return value:
4794 * none
4795 **/
4796static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4797{
4798 ioa_cfg->errors_logged++;
4799 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4800
4801 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4802 ioa_cfg->sdt_state = GET_DUMP;
4803
4804 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4805}
4806
4807/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 * ipr_isr - Interrupt service routine
4809 * @irq: irq number
4810 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 *
4812 * Return value:
4813 * IRQ_NONE / IRQ_HANDLED
4814 **/
David Howells7d12e782006-10-05 14:55:46 +01004815static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816{
4817 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4818 unsigned long lock_flags = 0;
4819 volatile u32 int_reg, int_mask_reg;
4820 u32 ioasc;
4821 u16 cmd_index;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004822 int num_hrrq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 struct ipr_cmnd *ipr_cmd;
4824 irqreturn_t rc = IRQ_NONE;
4825
4826 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4827
4828 /* If interrupts are disabled, ignore the interrupt */
4829 if (!ioa_cfg->allow_interrupts) {
4830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4831 return IRQ_NONE;
4832 }
4833
4834 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4835 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4836
4837 /* If an interrupt on the adapter did not occur, ignore it */
4838 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4840 return IRQ_NONE;
4841 }
4842
4843 while (1) {
4844 ipr_cmd = NULL;
4845
4846 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4847 ioa_cfg->toggle_bit) {
4848
4849 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4850 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4851
4852 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004853 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4855 return IRQ_HANDLED;
4856 }
4857
4858 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4859
4860 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4861
4862 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4863
4864 list_del(&ipr_cmd->queue);
4865 del_timer(&ipr_cmd->timer);
4866 ipr_cmd->done(ipr_cmd);
4867
4868 rc = IRQ_HANDLED;
4869
4870 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4871 ioa_cfg->hrrq_curr++;
4872 } else {
4873 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4874 ioa_cfg->toggle_bit ^= 1u;
4875 }
4876 }
4877
4878 if (ipr_cmd != NULL) {
4879 /* Clear the PCI interrupt */
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004880 do {
4881 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4882 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4883 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4884 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4885
4886 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4887 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4889 return IRQ_HANDLED;
4890 }
4891
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892 } else
4893 break;
4894 }
4895
4896 if (unlikely(rc == IRQ_NONE))
4897 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4898
4899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4900 return rc;
4901}
4902
4903/**
Wayne Boyera32c0552010-02-19 13:23:36 -08004904 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905 * @ioa_cfg: ioa config struct
4906 * @ipr_cmd: ipr command struct
4907 *
4908 * Return value:
4909 * 0 on success / -1 on failure
4910 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08004911static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4912 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004914 int i, nseg;
4915 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916 u32 length;
4917 u32 ioadl_flags = 0;
4918 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4919 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08004920 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004922 length = scsi_bufflen(scsi_cmd);
4923 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 return 0;
4925
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004926 nseg = scsi_dma_map(scsi_cmd);
4927 if (nseg < 0) {
4928 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4929 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 }
4931
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004932 ipr_cmd->dma_use_sg = nseg;
4933
4934 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4935 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4936 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08004937 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
4938 ioadl_flags = IPR_IOADL_FLAGS_READ;
4939
4940 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4941 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
4942 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
4943 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
4944 }
4945
4946 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4947 return 0;
4948}
4949
4950/**
4951 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4952 * @ioa_cfg: ioa config struct
4953 * @ipr_cmd: ipr command struct
4954 *
4955 * Return value:
4956 * 0 on success / -1 on failure
4957 **/
4958static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4959 struct ipr_cmnd *ipr_cmd)
4960{
4961 int i, nseg;
4962 struct scatterlist *sg;
4963 u32 length;
4964 u32 ioadl_flags = 0;
4965 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4966 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4967 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4968
4969 length = scsi_bufflen(scsi_cmd);
4970 if (!length)
4971 return 0;
4972
4973 nseg = scsi_dma_map(scsi_cmd);
4974 if (nseg < 0) {
4975 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4976 return -1;
4977 }
4978
4979 ipr_cmd->dma_use_sg = nseg;
4980
4981 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4982 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4983 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4984 ioarcb->data_transfer_length = cpu_to_be32(length);
4985 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004986 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4987 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4988 ioadl_flags = IPR_IOADL_FLAGS_READ;
4989 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4990 ioarcb->read_ioadl_len =
4991 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4992 }
4993
Wayne Boyera32c0552010-02-19 13:23:36 -08004994 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
4995 ioadl = ioarcb->u.add_data.u.ioadl;
4996 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
4997 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09004998 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4999 }
5000
5001 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5002 ioadl[i].flags_and_data_len =
5003 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5004 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5005 }
5006
5007 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5008 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009}
5010
5011/**
5012 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5013 * @scsi_cmd: scsi command struct
5014 *
5015 * Return value:
5016 * task attributes
5017 **/
5018static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5019{
5020 u8 tag[2];
5021 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5022
5023 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5024 switch (tag[0]) {
5025 case MSG_SIMPLE_TAG:
5026 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5027 break;
5028 case MSG_HEAD_TAG:
5029 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5030 break;
5031 case MSG_ORDERED_TAG:
5032 rc = IPR_FLAGS_LO_ORDERED_TASK;
5033 break;
5034 };
5035 }
5036
5037 return rc;
5038}
5039
5040/**
5041 * ipr_erp_done - Process completion of ERP for a device
5042 * @ipr_cmd: ipr command struct
5043 *
5044 * This function copies the sense buffer into the scsi_cmd
5045 * struct and pushes the scsi_done function.
5046 *
5047 * Return value:
5048 * nothing
5049 **/
5050static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5051{
5052 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5053 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5054 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5055 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5056
5057 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5058 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005059 scmd_printk(KERN_ERR, scsi_cmd,
5060 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061 } else {
5062 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5063 SCSI_SENSE_BUFFERSIZE);
5064 }
5065
5066 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005067 if (!ipr_is_naca_model(res))
5068 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 res->in_erp = 0;
5070 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005071 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5073 scsi_cmd->scsi_done(scsi_cmd);
5074}
5075
5076/**
5077 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5078 * @ipr_cmd: ipr command struct
5079 *
5080 * Return value:
5081 * none
5082 **/
5083static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5084{
Brian King51b1c7e2007-03-29 12:43:50 -05005085 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5086 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005087 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088
5089 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005090 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005092 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 ioarcb->read_ioadl_len = 0;
5094 ioasa->ioasc = 0;
5095 ioasa->residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005096
5097 if (ipr_cmd->ioa_cfg->sis64)
5098 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5099 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5100 else {
5101 ioarcb->write_ioadl_addr =
5102 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5103 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105}
5106
5107/**
5108 * ipr_erp_request_sense - Send request sense to a device
5109 * @ipr_cmd: ipr command struct
5110 *
5111 * This function sends a request sense to a device as a result
5112 * of a check condition.
5113 *
5114 * Return value:
5115 * nothing
5116 **/
5117static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5118{
5119 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5120 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5121
5122 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5123 ipr_erp_done(ipr_cmd);
5124 return;
5125 }
5126
5127 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5128
5129 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5130 cmd_pkt->cdb[0] = REQUEST_SENSE;
5131 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5132 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5133 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5134 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5135
Wayne Boyera32c0552010-02-19 13:23:36 -08005136 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5137 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138
5139 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5140 IPR_REQUEST_SENSE_TIMEOUT * 2);
5141}
5142
5143/**
5144 * ipr_erp_cancel_all - Send cancel all to a device
5145 * @ipr_cmd: ipr command struct
5146 *
5147 * This function sends a cancel all to a device to clear the
5148 * queue. If we are running TCQ on the device, QERR is set to 1,
5149 * which means all outstanding ops have been dropped on the floor.
5150 * Cancel all will return them to us.
5151 *
5152 * Return value:
5153 * nothing
5154 **/
5155static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5156{
5157 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5158 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5159 struct ipr_cmd_pkt *cmd_pkt;
5160
5161 res->in_erp = 1;
5162
5163 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5164
5165 if (!scsi_get_tag_type(scsi_cmd->device)) {
5166 ipr_erp_request_sense(ipr_cmd);
5167 return;
5168 }
5169
5170 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5171 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5172 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5173
5174 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5175 IPR_CANCEL_ALL_TIMEOUT);
5176}
5177
5178/**
5179 * ipr_dump_ioasa - Dump contents of IOASA
5180 * @ioa_cfg: ioa config struct
5181 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005182 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 *
5184 * This function is invoked by the interrupt handler when ops
5185 * fail. It will log the IOASA if appropriate. Only called
5186 * for GPDD ops.
5187 *
5188 * Return value:
5189 * none
5190 **/
5191static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005192 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193{
5194 int i;
5195 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005196 u32 ioasc, fd_ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5198 __be32 *ioasa_data = (__be32 *)ioasa;
5199 int error_index;
5200
5201 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
Brian Kingb0692dd2007-03-29 12:43:09 -05005202 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203
5204 if (0 == ioasc)
5205 return;
5206
5207 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5208 return;
5209
Brian Kingb0692dd2007-03-29 12:43:09 -05005210 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5211 error_index = ipr_get_error(fd_ioasc);
5212 else
5213 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214
5215 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5216 /* Don't log an error if the IOA already logged one */
5217 if (ioasa->ilid != 0)
5218 return;
5219
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005220 if (!ipr_is_gscsi(res))
5221 return;
5222
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 if (ipr_error_table[error_index].log_ioasa == 0)
5224 return;
5225 }
5226
Brian Kingfe964d02006-03-29 09:37:29 -06005227 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
5229 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5230 data_len = sizeof(struct ipr_ioasa);
5231 else
5232 data_len = be16_to_cpu(ioasa->ret_stat_len);
5233
5234 ipr_err("IOASA Dump:\n");
5235
5236 for (i = 0; i < data_len / 4; i += 4) {
5237 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5238 be32_to_cpu(ioasa_data[i]),
5239 be32_to_cpu(ioasa_data[i+1]),
5240 be32_to_cpu(ioasa_data[i+2]),
5241 be32_to_cpu(ioasa_data[i+3]));
5242 }
5243}
5244
5245/**
5246 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5247 * @ioasa: IOASA
5248 * @sense_buf: sense data buffer
5249 *
5250 * Return value:
5251 * none
5252 **/
5253static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5254{
5255 u32 failing_lba;
5256 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5257 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5258 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5259 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5260
5261 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5262
5263 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5264 return;
5265
5266 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5267
5268 if (ipr_is_vset_device(res) &&
5269 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5270 ioasa->u.vset.failing_lba_hi != 0) {
5271 sense_buf[0] = 0x72;
5272 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5273 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5274 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5275
5276 sense_buf[7] = 12;
5277 sense_buf[8] = 0;
5278 sense_buf[9] = 0x0A;
5279 sense_buf[10] = 0x80;
5280
5281 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5282
5283 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5284 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5285 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5286 sense_buf[15] = failing_lba & 0x000000ff;
5287
5288 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5289
5290 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5291 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5292 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5293 sense_buf[19] = failing_lba & 0x000000ff;
5294 } else {
5295 sense_buf[0] = 0x70;
5296 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5297 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5298 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5299
5300 /* Illegal request */
5301 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5302 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5303 sense_buf[7] = 10; /* additional length */
5304
5305 /* IOARCB was in error */
5306 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5307 sense_buf[15] = 0xC0;
5308 else /* Parameter data was invalid */
5309 sense_buf[15] = 0x80;
5310
5311 sense_buf[16] =
5312 ((IPR_FIELD_POINTER_MASK &
5313 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5314 sense_buf[17] =
5315 (IPR_FIELD_POINTER_MASK &
5316 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5317 } else {
5318 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5319 if (ipr_is_vset_device(res))
5320 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5321 else
5322 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5323
5324 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5325 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5326 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5327 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5328 sense_buf[6] = failing_lba & 0x000000ff;
5329 }
5330
5331 sense_buf[7] = 6; /* additional length */
5332 }
5333 }
5334}
5335
5336/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005337 * ipr_get_autosense - Copy autosense data to sense buffer
5338 * @ipr_cmd: ipr command struct
5339 *
5340 * This function copies the autosense buffer to the buffer
5341 * in the scsi_cmd, if there is autosense available.
5342 *
5343 * Return value:
5344 * 1 if autosense was available / 0 if not
5345 **/
5346static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5347{
5348 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5349
Brian King117d2ce2006-08-02 14:57:58 -05005350 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005351 return 0;
5352
5353 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5354 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5355 SCSI_SENSE_BUFFERSIZE));
5356 return 1;
5357}
5358
5359/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360 * ipr_erp_start - Process an error response for a SCSI op
5361 * @ioa_cfg: ioa config struct
5362 * @ipr_cmd: ipr command struct
5363 *
5364 * This function determines whether or not to initiate ERP
5365 * on the affected device.
5366 *
5367 * Return value:
5368 * nothing
5369 **/
5370static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5371 struct ipr_cmnd *ipr_cmd)
5372{
5373 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5374 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5375 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005376 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377
5378 if (!res) {
5379 ipr_scsi_eh_done(ipr_cmd);
5380 return;
5381 }
5382
Brian King8a048992007-04-26 16:00:10 -05005383 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 ipr_gen_sense(ipr_cmd);
5385
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005386 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5387
Brian King8a048992007-04-26 16:00:10 -05005388 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005390 if (ipr_is_naca_model(res))
5391 scsi_cmd->result |= (DID_ABORT << 16);
5392 else
5393 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 break;
5395 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005396 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5398 break;
5399 case IPR_IOASC_HW_SEL_TIMEOUT:
5400 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005401 if (!ipr_is_naca_model(res))
5402 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 break;
5404 case IPR_IOASC_SYNC_REQUIRED:
5405 if (!res->in_erp)
5406 res->needs_sync_complete = 1;
5407 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5408 break;
5409 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005410 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5412 break;
5413 case IPR_IOASC_BUS_WAS_RESET:
5414 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5415 /*
5416 * Report the bus reset and ask for a retry. The device
5417 * will give CC/UA the next command.
5418 */
5419 if (!res->resetting_device)
5420 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5421 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005422 if (!ipr_is_naca_model(res))
5423 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 break;
5425 case IPR_IOASC_HW_DEV_BUS_STATUS:
5426 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5427 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005428 if (!ipr_get_autosense(ipr_cmd)) {
5429 if (!ipr_is_naca_model(res)) {
5430 ipr_erp_cancel_all(ipr_cmd);
5431 return;
5432 }
5433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005435 if (!ipr_is_naca_model(res))
5436 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437 break;
5438 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5439 break;
5440 default:
Brian King5b7304f2006-08-02 14:57:51 -05005441 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5442 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005443 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 res->needs_sync_complete = 1;
5445 break;
5446 }
5447
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005448 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5450 scsi_cmd->scsi_done(scsi_cmd);
5451}
5452
5453/**
5454 * ipr_scsi_done - mid-layer done function
5455 * @ipr_cmd: ipr command struct
5456 *
5457 * This function is invoked by the interrupt handler for
5458 * ops generated by the SCSI mid-layer
5459 *
5460 * Return value:
5461 * none
5462 **/
5463static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5464{
5465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5466 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5467 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5468
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005469 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470
5471 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005472 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5474 scsi_cmd->scsi_done(scsi_cmd);
5475 } else
5476 ipr_erp_start(ioa_cfg, ipr_cmd);
5477}
5478
5479/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480 * ipr_queuecommand - Queue a mid-layer request
5481 * @scsi_cmd: scsi command struct
5482 * @done: done function
5483 *
5484 * This function queues a request generated by the mid-layer.
5485 *
5486 * Return value:
5487 * 0 on success
5488 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5489 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5490 **/
5491static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5492 void (*done) (struct scsi_cmnd *))
5493{
5494 struct ipr_ioa_cfg *ioa_cfg;
5495 struct ipr_resource_entry *res;
5496 struct ipr_ioarcb *ioarcb;
5497 struct ipr_cmnd *ipr_cmd;
5498 int rc = 0;
5499
5500 scsi_cmd->scsi_done = done;
5501 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5502 res = scsi_cmd->device->hostdata;
5503 scsi_cmd->result = (DID_OK << 16);
5504
5505 /*
5506 * We are currently blocking all devices due to a host reset
5507 * We have told the host to stop giving us new requests, but
5508 * ERP ops don't count. FIXME
5509 */
5510 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5511 return SCSI_MLQUEUE_HOST_BUSY;
5512
5513 /*
5514 * FIXME - Create scsi_set_host_offline interface
5515 * and the ioa_is_dead check can be removed
5516 */
5517 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5518 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5519 scsi_cmd->result = (DID_NO_CONNECT << 16);
5520 scsi_cmd->scsi_done(scsi_cmd);
5521 return 0;
5522 }
5523
Brian King35a39692006-09-25 12:39:20 -05005524 if (ipr_is_gata(res) && res->sata_port)
5525 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5526
Linus Torvalds1da177e2005-04-16 15:20:36 -07005527 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5528 ioarcb = &ipr_cmd->ioarcb;
5529 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5530
5531 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5532 ipr_cmd->scsi_cmd = scsi_cmd;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005533 ioarcb->res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534 ipr_cmd->done = ipr_scsi_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005535 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536
5537 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5538 if (scsi_cmd->underflow == 0)
5539 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5540
5541 if (res->needs_sync_complete) {
5542 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5543 res->needs_sync_complete = 0;
5544 }
5545
5546 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5547 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5548 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5549 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5550 }
5551
5552 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5553 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5554 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5555
Wayne Boyera32c0552010-02-19 13:23:36 -08005556 if (likely(rc == 0)) {
5557 if (ioa_cfg->sis64)
5558 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5559 else
5560 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562
5563 if (likely(rc == 0)) {
5564 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08005565 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 } else {
5567 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5568 return SCSI_MLQUEUE_HOST_BUSY;
5569 }
5570
5571 return 0;
5572}
5573
5574/**
Brian King35a39692006-09-25 12:39:20 -05005575 * ipr_ioctl - IOCTL handler
5576 * @sdev: scsi device struct
5577 * @cmd: IOCTL cmd
5578 * @arg: IOCTL arg
5579 *
5580 * Return value:
5581 * 0 on success / other on failure
5582 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06005583static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05005584{
5585 struct ipr_resource_entry *res;
5586
5587 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05005588 if (res && ipr_is_gata(res)) {
5589 if (cmd == HDIO_GET_IDENTITY)
5590 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05005591 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05005592 }
Brian King35a39692006-09-25 12:39:20 -05005593
5594 return -EINVAL;
5595}
5596
5597/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005598 * ipr_info - Get information about the card/driver
5599 * @scsi_host: scsi host struct
5600 *
5601 * Return value:
5602 * pointer to buffer with description string
5603 **/
5604static const char * ipr_ioa_info(struct Scsi_Host *host)
5605{
5606 static char buffer[512];
5607 struct ipr_ioa_cfg *ioa_cfg;
5608 unsigned long lock_flags = 0;
5609
5610 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5611
5612 spin_lock_irqsave(host->host_lock, lock_flags);
5613 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5614 spin_unlock_irqrestore(host->host_lock, lock_flags);
5615
5616 return buffer;
5617}
5618
5619static struct scsi_host_template driver_template = {
5620 .module = THIS_MODULE,
5621 .name = "IPR",
5622 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05005623 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005624 .queuecommand = ipr_queuecommand,
5625 .eh_abort_handler = ipr_eh_abort,
5626 .eh_device_reset_handler = ipr_eh_dev_reset,
5627 .eh_host_reset_handler = ipr_eh_host_reset,
5628 .slave_alloc = ipr_slave_alloc,
5629 .slave_configure = ipr_slave_configure,
5630 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05005631 .target_alloc = ipr_target_alloc,
5632 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 .change_queue_depth = ipr_change_queue_depth,
5634 .change_queue_type = ipr_change_queue_type,
5635 .bios_param = ipr_biosparam,
5636 .can_queue = IPR_MAX_COMMANDS,
5637 .this_id = -1,
5638 .sg_tablesize = IPR_MAX_SGLIST,
5639 .max_sectors = IPR_IOA_MAX_SECTORS,
5640 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5641 .use_clustering = ENABLE_CLUSTERING,
5642 .shost_attrs = ipr_ioa_attrs,
5643 .sdev_attrs = ipr_dev_attrs,
5644 .proc_name = IPR_NAME
5645};
5646
Brian King35a39692006-09-25 12:39:20 -05005647/**
5648 * ipr_ata_phy_reset - libata phy_reset handler
5649 * @ap: ata port to reset
5650 *
5651 **/
5652static void ipr_ata_phy_reset(struct ata_port *ap)
5653{
5654 unsigned long flags;
5655 struct ipr_sata_port *sata_port = ap->private_data;
5656 struct ipr_resource_entry *res = sata_port->res;
5657 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5658 int rc;
5659
5660 ENTER;
5661 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5662 while(ioa_cfg->in_reset_reload) {
5663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5665 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5666 }
5667
5668 if (!ioa_cfg->allow_cmds)
5669 goto out_unlock;
5670
5671 rc = ipr_device_reset(ioa_cfg, res);
5672
5673 if (rc) {
Jeff Garzikac8869d2007-08-16 03:17:03 -04005674 ata_port_disable(ap);
Brian King35a39692006-09-25 12:39:20 -05005675 goto out_unlock;
5676 }
5677
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005678 ap->link.device[0].class = res->ata_class;
5679 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Jeff Garzikac8869d2007-08-16 03:17:03 -04005680 ata_port_disable(ap);
Brian King35a39692006-09-25 12:39:20 -05005681
5682out_unlock:
5683 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5684 LEAVE;
5685}
5686
5687/**
5688 * ipr_ata_post_internal - Cleanup after an internal command
5689 * @qc: ATA queued command
5690 *
5691 * Return value:
5692 * none
5693 **/
5694static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5695{
5696 struct ipr_sata_port *sata_port = qc->ap->private_data;
5697 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5698 struct ipr_cmnd *ipr_cmd;
5699 unsigned long flags;
5700
5701 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King73d98ff2006-11-21 10:27:58 -06005702 while(ioa_cfg->in_reset_reload) {
5703 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5704 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5705 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5706 }
5707
Brian King35a39692006-09-25 12:39:20 -05005708 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5709 if (ipr_cmd->qc == qc) {
5710 ipr_device_reset(ioa_cfg, sata_port->res);
5711 break;
5712 }
5713 }
5714 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5715}
5716
5717/**
Brian King35a39692006-09-25 12:39:20 -05005718 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5719 * @regs: destination
5720 * @tf: source ATA taskfile
5721 *
5722 * Return value:
5723 * none
5724 **/
5725static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5726 struct ata_taskfile *tf)
5727{
5728 regs->feature = tf->feature;
5729 regs->nsect = tf->nsect;
5730 regs->lbal = tf->lbal;
5731 regs->lbam = tf->lbam;
5732 regs->lbah = tf->lbah;
5733 regs->device = tf->device;
5734 regs->command = tf->command;
5735 regs->hob_feature = tf->hob_feature;
5736 regs->hob_nsect = tf->hob_nsect;
5737 regs->hob_lbal = tf->hob_lbal;
5738 regs->hob_lbam = tf->hob_lbam;
5739 regs->hob_lbah = tf->hob_lbah;
5740 regs->ctl = tf->ctl;
5741}
5742
5743/**
5744 * ipr_sata_done - done function for SATA commands
5745 * @ipr_cmd: ipr command struct
5746 *
5747 * This function is invoked by the interrupt handler for
5748 * ops generated by the SCSI mid-layer to SATA devices
5749 *
5750 * Return value:
5751 * none
5752 **/
5753static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5754{
5755 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5756 struct ata_queued_cmd *qc = ipr_cmd->qc;
5757 struct ipr_sata_port *sata_port = qc->ap->private_data;
5758 struct ipr_resource_entry *res = sata_port->res;
5759 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5760
5761 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5762 sizeof(struct ipr_ioasa_gata));
5763 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5764
5765 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005766 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05005767
5768 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5769 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5770 else
5771 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5772 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5773 ata_qc_complete(qc);
5774}
5775
5776/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005777 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5778 * @ipr_cmd: ipr command struct
5779 * @qc: ATA queued command
5780 *
5781 **/
5782static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5783 struct ata_queued_cmd *qc)
5784{
5785 u32 ioadl_flags = 0;
5786 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5787 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5788 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5789 int len = qc->nbytes;
5790 struct scatterlist *sg;
5791 unsigned int si;
5792 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5793
5794 if (len == 0)
5795 return;
5796
5797 if (qc->dma_dir == DMA_TO_DEVICE) {
5798 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5799 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5800 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5801 ioadl_flags = IPR_IOADL_FLAGS_READ;
5802
5803 ioarcb->data_transfer_length = cpu_to_be32(len);
5804 ioarcb->ioadl_len =
5805 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5806 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5807 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5808
5809 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5810 ioadl64->flags = cpu_to_be32(ioadl_flags);
5811 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5812 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5813
5814 last_ioadl64 = ioadl64;
5815 ioadl64++;
5816 }
5817
5818 if (likely(last_ioadl64))
5819 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5820}
5821
5822/**
Brian King35a39692006-09-25 12:39:20 -05005823 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5824 * @ipr_cmd: ipr command struct
5825 * @qc: ATA queued command
5826 *
5827 **/
5828static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5829 struct ata_queued_cmd *qc)
5830{
5831 u32 ioadl_flags = 0;
5832 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005833 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005834 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01005835 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05005836 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09005837 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05005838
5839 if (len == 0)
5840 return;
5841
5842 if (qc->dma_dir == DMA_TO_DEVICE) {
5843 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5844 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005845 ioarcb->data_transfer_length = cpu_to_be32(len);
5846 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05005847 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5848 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5849 ioadl_flags = IPR_IOADL_FLAGS_READ;
5850 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5851 ioarcb->read_ioadl_len =
5852 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5853 }
5854
Tejun Heoff2aeb12007-12-05 16:43:11 +09005855 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05005856 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5857 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005858
5859 last_ioadl = ioadl;
5860 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05005861 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005862
5863 if (likely(last_ioadl))
5864 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05005865}
5866
5867/**
5868 * ipr_qc_issue - Issue a SATA qc to a device
5869 * @qc: queued command
5870 *
5871 * Return value:
5872 * 0 if success
5873 **/
5874static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5875{
5876 struct ata_port *ap = qc->ap;
5877 struct ipr_sata_port *sata_port = ap->private_data;
5878 struct ipr_resource_entry *res = sata_port->res;
5879 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5880 struct ipr_cmnd *ipr_cmd;
5881 struct ipr_ioarcb *ioarcb;
5882 struct ipr_ioarcb_ata_regs *regs;
5883
5884 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
Brian King0feeed82007-03-29 12:43:43 -05005885 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05005886
5887 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5888 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05005889
Wayne Boyera32c0552010-02-19 13:23:36 -08005890 if (ioa_cfg->sis64) {
5891 regs = &ipr_cmd->i.ata_ioadl.regs;
5892 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5893 } else
5894 regs = &ioarcb->u.add_data.u.regs;
5895
5896 memset(regs, 0, sizeof(*regs));
5897 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05005898
5899 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5900 ipr_cmd->qc = qc;
5901 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005902 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05005903 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5904 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5905 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01005906 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05005907
Wayne Boyera32c0552010-02-19 13:23:36 -08005908 if (ioa_cfg->sis64)
5909 ipr_build_ata_ioadl64(ipr_cmd, qc);
5910 else
5911 ipr_build_ata_ioadl(ipr_cmd, qc);
5912
Brian King35a39692006-09-25 12:39:20 -05005913 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5914 ipr_copy_sata_tf(regs, &qc->tf);
5915 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005916 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05005917
5918 switch (qc->tf.protocol) {
5919 case ATA_PROT_NODATA:
5920 case ATA_PROT_PIO:
5921 break;
5922
5923 case ATA_PROT_DMA:
5924 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5925 break;
5926
Tejun Heo0dc36882007-12-18 16:34:43 -05005927 case ATAPI_PROT_PIO:
5928 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05005929 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5930 break;
5931
Tejun Heo0dc36882007-12-18 16:34:43 -05005932 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05005933 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5934 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5935 break;
5936
5937 default:
5938 WARN_ON(1);
Brian King0feeed82007-03-29 12:43:43 -05005939 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05005940 }
5941
5942 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08005943
5944 ipr_send_command(ipr_cmd);
5945
Brian King35a39692006-09-25 12:39:20 -05005946 return 0;
5947}
5948
5949/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09005950 * ipr_qc_fill_rtf - Read result TF
5951 * @qc: ATA queued command
5952 *
5953 * Return value:
5954 * true
5955 **/
5956static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
5957{
5958 struct ipr_sata_port *sata_port = qc->ap->private_data;
5959 struct ipr_ioasa_gata *g = &sata_port->ioasa;
5960 struct ata_taskfile *tf = &qc->result_tf;
5961
5962 tf->feature = g->error;
5963 tf->nsect = g->nsect;
5964 tf->lbal = g->lbal;
5965 tf->lbam = g->lbam;
5966 tf->lbah = g->lbah;
5967 tf->device = g->device;
5968 tf->command = g->status;
5969 tf->hob_nsect = g->hob_nsect;
5970 tf->hob_lbal = g->hob_lbal;
5971 tf->hob_lbam = g->hob_lbam;
5972 tf->hob_lbah = g->hob_lbah;
5973 tf->ctl = g->alt_status;
5974
5975 return true;
5976}
5977
Brian King35a39692006-09-25 12:39:20 -05005978static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05005979 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09005980 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05005981 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05005982 .qc_prep = ata_noop_qc_prep,
5983 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09005984 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05005985 .port_start = ata_sas_port_start,
5986 .port_stop = ata_sas_port_stop
5987};
5988
5989static struct ata_port_info sata_port_info = {
5990 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
5991 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
5992 .pio_mask = 0x10, /* pio4 */
5993 .mwdma_mask = 0x07,
5994 .udma_mask = 0x7f, /* udma0-6 */
5995 .port_ops = &ipr_sata_ops
5996};
5997
Linus Torvalds1da177e2005-04-16 15:20:36 -07005998#ifdef CONFIG_PPC_PSERIES
5999static const u16 ipr_blocked_processors[] = {
6000 PV_NORTHSTAR,
6001 PV_PULSAR,
6002 PV_POWER4,
6003 PV_ICESTAR,
6004 PV_SSTAR,
6005 PV_POWER4p,
6006 PV_630,
6007 PV_630p
6008};
6009
6010/**
6011 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6012 * @ioa_cfg: ioa cfg struct
6013 *
6014 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6015 * certain pSeries hardware. This function determines if the given
6016 * adapter is in one of these confgurations or not.
6017 *
6018 * Return value:
6019 * 1 if adapter is not supported / 0 if adapter is supported
6020 **/
6021static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6022{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023 int i;
6024
Auke Kok44c10132007-06-08 15:46:36 -07006025 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6026 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6027 if (__is_processor(ipr_blocked_processors[i]))
6028 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006029 }
6030 }
6031 return 0;
6032}
6033#else
6034#define ipr_invalid_adapter(ioa_cfg) 0
6035#endif
6036
6037/**
6038 * ipr_ioa_bringdown_done - IOA bring down completion.
6039 * @ipr_cmd: ipr command struct
6040 *
6041 * This function processes the completion of an adapter bring down.
6042 * It wakes any reset sleepers.
6043 *
6044 * Return value:
6045 * IPR_RC_JOB_RETURN
6046 **/
6047static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6048{
6049 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6050
6051 ENTER;
6052 ioa_cfg->in_reset_reload = 0;
6053 ioa_cfg->reset_retries = 0;
6054 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6055 wake_up_all(&ioa_cfg->reset_wait_q);
6056
6057 spin_unlock_irq(ioa_cfg->host->host_lock);
6058 scsi_unblock_requests(ioa_cfg->host);
6059 spin_lock_irq(ioa_cfg->host->host_lock);
6060 LEAVE;
6061
6062 return IPR_RC_JOB_RETURN;
6063}
6064
6065/**
6066 * ipr_ioa_reset_done - IOA reset completion.
6067 * @ipr_cmd: ipr command struct
6068 *
6069 * This function processes the completion of an adapter reset.
6070 * It schedules any necessary mid-layer add/removes and
6071 * wakes any reset sleepers.
6072 *
6073 * Return value:
6074 * IPR_RC_JOB_RETURN
6075 **/
6076static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6077{
6078 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6079 struct ipr_resource_entry *res;
6080 struct ipr_hostrcb *hostrcb, *temp;
6081 int i = 0;
6082
6083 ENTER;
6084 ioa_cfg->in_reset_reload = 0;
6085 ioa_cfg->allow_cmds = 1;
6086 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006087 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088
6089 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6090 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6091 ipr_trace;
6092 break;
6093 }
6094 }
6095 schedule_work(&ioa_cfg->work_q);
6096
6097 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6098 list_del(&hostrcb->queue);
6099 if (i++ < IPR_NUM_LOG_HCAMS)
6100 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6101 else
6102 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6103 }
6104
Brian King6bb04172007-04-26 16:00:08 -05006105 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006106 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6107
6108 ioa_cfg->reset_retries = 0;
6109 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6110 wake_up_all(&ioa_cfg->reset_wait_q);
6111
Mark Nelson30237852008-12-10 12:23:20 +11006112 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006113 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006114 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115
6116 if (!ioa_cfg->allow_cmds)
6117 scsi_block_requests(ioa_cfg->host);
6118
6119 LEAVE;
6120 return IPR_RC_JOB_RETURN;
6121}
6122
6123/**
6124 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6125 * @supported_dev: supported device struct
6126 * @vpids: vendor product id struct
6127 *
6128 * Return value:
6129 * none
6130 **/
6131static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6132 struct ipr_std_inq_vpids *vpids)
6133{
6134 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6135 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6136 supported_dev->num_records = 1;
6137 supported_dev->data_length =
6138 cpu_to_be16(sizeof(struct ipr_supported_device));
6139 supported_dev->reserved = 0;
6140}
6141
6142/**
6143 * ipr_set_supported_devs - Send Set Supported Devices for a device
6144 * @ipr_cmd: ipr command struct
6145 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006146 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147 *
6148 * Return value:
6149 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6150 **/
6151static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6152{
6153 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6154 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6156 struct ipr_resource_entry *res = ipr_cmd->u.res;
6157
6158 ipr_cmd->job_step = ipr_ioa_reset_done;
6159
6160 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006161 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162 continue;
6163
6164 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006165 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006166
6167 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6168 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6169 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6170
6171 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006172 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006173 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6174 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6175
Wayne Boyera32c0552010-02-19 13:23:36 -08006176 ipr_init_ioadl(ipr_cmd,
6177 ioa_cfg->vpd_cbs_dma +
6178 offsetof(struct ipr_misc_cbs, supp_dev),
6179 sizeof(struct ipr_supported_device),
6180 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181
6182 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6183 IPR_SET_SUP_DEVICE_TIMEOUT);
6184
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006185 if (!ioa_cfg->sis64)
6186 ipr_cmd->job_step = ipr_set_supported_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006187 return IPR_RC_JOB_RETURN;
6188 }
6189
6190 return IPR_RC_JOB_CONTINUE;
6191}
6192
6193/**
6194 * ipr_get_mode_page - Locate specified mode page
6195 * @mode_pages: mode page buffer
6196 * @page_code: page code to find
6197 * @len: minimum required length for mode page
6198 *
6199 * Return value:
6200 * pointer to mode page / NULL on failure
6201 **/
6202static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6203 u32 page_code, u32 len)
6204{
6205 struct ipr_mode_page_hdr *mode_hdr;
6206 u32 page_length;
6207 u32 length;
6208
6209 if (!mode_pages || (mode_pages->hdr.length == 0))
6210 return NULL;
6211
6212 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6213 mode_hdr = (struct ipr_mode_page_hdr *)
6214 (mode_pages->data + mode_pages->hdr.block_desc_len);
6215
6216 while (length) {
6217 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6218 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6219 return mode_hdr;
6220 break;
6221 } else {
6222 page_length = (sizeof(struct ipr_mode_page_hdr) +
6223 mode_hdr->page_length);
6224 length -= page_length;
6225 mode_hdr = (struct ipr_mode_page_hdr *)
6226 ((unsigned long)mode_hdr + page_length);
6227 }
6228 }
6229 return NULL;
6230}
6231
6232/**
6233 * ipr_check_term_power - Check for term power errors
6234 * @ioa_cfg: ioa config struct
6235 * @mode_pages: IOAFP mode pages buffer
6236 *
6237 * Check the IOAFP's mode page 28 for term power errors
6238 *
6239 * Return value:
6240 * nothing
6241 **/
6242static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6243 struct ipr_mode_pages *mode_pages)
6244{
6245 int i;
6246 int entry_length;
6247 struct ipr_dev_bus_entry *bus;
6248 struct ipr_mode_page28 *mode_page;
6249
6250 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6251 sizeof(struct ipr_mode_page28));
6252
6253 entry_length = mode_page->entry_length;
6254
6255 bus = mode_page->bus;
6256
6257 for (i = 0; i < mode_page->num_entries; i++) {
6258 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6259 dev_err(&ioa_cfg->pdev->dev,
6260 "Term power is absent on scsi bus %d\n",
6261 bus->res_addr.bus);
6262 }
6263
6264 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6265 }
6266}
6267
6268/**
6269 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6270 * @ioa_cfg: ioa config struct
6271 *
6272 * Looks through the config table checking for SES devices. If
6273 * the SES device is in the SES table indicating a maximum SCSI
6274 * bus speed, the speed is limited for the bus.
6275 *
6276 * Return value:
6277 * none
6278 **/
6279static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6280{
6281 u32 max_xfer_rate;
6282 int i;
6283
6284 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6285 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6286 ioa_cfg->bus_attr[i].bus_width);
6287
6288 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6289 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6290 }
6291}
6292
6293/**
6294 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6295 * @ioa_cfg: ioa config struct
6296 * @mode_pages: mode page 28 buffer
6297 *
6298 * Updates mode page 28 based on driver configuration
6299 *
6300 * Return value:
6301 * none
6302 **/
6303static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6304 struct ipr_mode_pages *mode_pages)
6305{
6306 int i, entry_length;
6307 struct ipr_dev_bus_entry *bus;
6308 struct ipr_bus_attributes *bus_attr;
6309 struct ipr_mode_page28 *mode_page;
6310
6311 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6312 sizeof(struct ipr_mode_page28));
6313
6314 entry_length = mode_page->entry_length;
6315
6316 /* Loop for each device bus entry */
6317 for (i = 0, bus = mode_page->bus;
6318 i < mode_page->num_entries;
6319 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6320 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6321 dev_err(&ioa_cfg->pdev->dev,
6322 "Invalid resource address reported: 0x%08X\n",
6323 IPR_GET_PHYS_LOC(bus->res_addr));
6324 continue;
6325 }
6326
6327 bus_attr = &ioa_cfg->bus_attr[i];
6328 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6329 bus->bus_width = bus_attr->bus_width;
6330 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6331 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6332 if (bus_attr->qas_enabled)
6333 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6334 else
6335 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6336 }
6337}
6338
6339/**
6340 * ipr_build_mode_select - Build a mode select command
6341 * @ipr_cmd: ipr command struct
6342 * @res_handle: resource handle to send command to
6343 * @parm: Byte 2 of Mode Sense command
6344 * @dma_addr: DMA buffer address
6345 * @xfer_len: data transfer length
6346 *
6347 * Return value:
6348 * none
6349 **/
6350static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08006351 __be32 res_handle, u8 parm,
6352 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006353{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006354 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6355
6356 ioarcb->res_handle = res_handle;
6357 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6358 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6359 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6360 ioarcb->cmd_pkt.cdb[1] = parm;
6361 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6362
Wayne Boyera32c0552010-02-19 13:23:36 -08006363 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006364}
6365
6366/**
6367 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6368 * @ipr_cmd: ipr command struct
6369 *
6370 * This function sets up the SCSI bus attributes and sends
6371 * a Mode Select for Page 28 to activate them.
6372 *
6373 * Return value:
6374 * IPR_RC_JOB_RETURN
6375 **/
6376static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6377{
6378 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6379 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6380 int length;
6381
6382 ENTER;
Brian King47338042006-02-08 20:57:42 -06006383 ipr_scsi_bus_speed_limit(ioa_cfg);
6384 ipr_check_term_power(ioa_cfg, mode_pages);
6385 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6386 length = mode_pages->hdr.length + 1;
6387 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006388
6389 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6390 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6391 length);
6392
Wayne Boyerf72919e2010-02-19 13:24:21 -08006393 ipr_cmd->job_step = ipr_set_supported_devs;
6394 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6395 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6397
6398 LEAVE;
6399 return IPR_RC_JOB_RETURN;
6400}
6401
6402/**
6403 * ipr_build_mode_sense - Builds a mode sense command
6404 * @ipr_cmd: ipr command struct
6405 * @res: resource entry struct
6406 * @parm: Byte 2 of mode sense command
6407 * @dma_addr: DMA address of mode sense buffer
6408 * @xfer_len: Size of DMA buffer
6409 *
6410 * Return value:
6411 * none
6412 **/
6413static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6414 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08006415 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006416{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006417 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6418
6419 ioarcb->res_handle = res_handle;
6420 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6421 ioarcb->cmd_pkt.cdb[2] = parm;
6422 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6423 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6424
Wayne Boyera32c0552010-02-19 13:23:36 -08006425 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006426}
6427
6428/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006429 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6430 * @ipr_cmd: ipr command struct
6431 *
6432 * This function handles the failure of an IOA bringup command.
6433 *
6434 * Return value:
6435 * IPR_RC_JOB_RETURN
6436 **/
6437static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6438{
6439 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6440 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6441
6442 dev_err(&ioa_cfg->pdev->dev,
6443 "0x%02X failed with IOASC: 0x%08X\n",
6444 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6445
6446 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6447 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6448 return IPR_RC_JOB_RETURN;
6449}
6450
6451/**
6452 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6453 * @ipr_cmd: ipr command struct
6454 *
6455 * This function handles the failure of a Mode Sense to the IOAFP.
6456 * Some adapters do not handle all mode pages.
6457 *
6458 * Return value:
6459 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6460 **/
6461static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6462{
Wayne Boyerf72919e2010-02-19 13:24:21 -08006463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006464 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6465
6466 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08006467 ipr_cmd->job_step = ipr_set_supported_devs;
6468 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6469 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006470 return IPR_RC_JOB_CONTINUE;
6471 }
6472
6473 return ipr_reset_cmd_failed(ipr_cmd);
6474}
6475
6476/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006477 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6478 * @ipr_cmd: ipr command struct
6479 *
6480 * This function send a Page 28 mode sense to the IOA to
6481 * retrieve SCSI bus attributes.
6482 *
6483 * Return value:
6484 * IPR_RC_JOB_RETURN
6485 **/
6486static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6487{
6488 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6489
6490 ENTER;
6491 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6492 0x28, ioa_cfg->vpd_cbs_dma +
6493 offsetof(struct ipr_misc_cbs, mode_pages),
6494 sizeof(struct ipr_mode_pages));
6495
6496 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006497 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006498
6499 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6500
6501 LEAVE;
6502 return IPR_RC_JOB_RETURN;
6503}
6504
6505/**
Brian Kingac09c342007-04-26 16:00:16 -05006506 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6507 * @ipr_cmd: ipr command struct
6508 *
6509 * This function enables dual IOA RAID support if possible.
6510 *
6511 * Return value:
6512 * IPR_RC_JOB_RETURN
6513 **/
6514static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6515{
6516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6517 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6518 struct ipr_mode_page24 *mode_page;
6519 int length;
6520
6521 ENTER;
6522 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6523 sizeof(struct ipr_mode_page24));
6524
6525 if (mode_page)
6526 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6527
6528 length = mode_pages->hdr.length + 1;
6529 mode_pages->hdr.length = 0;
6530
6531 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6532 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6533 length);
6534
6535 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6536 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6537
6538 LEAVE;
6539 return IPR_RC_JOB_RETURN;
6540}
6541
6542/**
6543 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6544 * @ipr_cmd: ipr command struct
6545 *
6546 * This function handles the failure of a Mode Sense to the IOAFP.
6547 * Some adapters do not handle all mode pages.
6548 *
6549 * Return value:
6550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6551 **/
6552static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6553{
6554 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6555
6556 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6557 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6558 return IPR_RC_JOB_CONTINUE;
6559 }
6560
6561 return ipr_reset_cmd_failed(ipr_cmd);
6562}
6563
6564/**
6565 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6566 * @ipr_cmd: ipr command struct
6567 *
6568 * This function send a mode sense to the IOA to retrieve
6569 * the IOA Advanced Function Control mode page.
6570 *
6571 * Return value:
6572 * IPR_RC_JOB_RETURN
6573 **/
6574static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6575{
6576 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6577
6578 ENTER;
6579 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6580 0x24, ioa_cfg->vpd_cbs_dma +
6581 offsetof(struct ipr_misc_cbs, mode_pages),
6582 sizeof(struct ipr_mode_pages));
6583
6584 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6585 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6586
6587 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6588
6589 LEAVE;
6590 return IPR_RC_JOB_RETURN;
6591}
6592
6593/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006594 * ipr_init_res_table - Initialize the resource table
6595 * @ipr_cmd: ipr command struct
6596 *
6597 * This function looks through the existing resource table, comparing
6598 * it with the config table. This function will take care of old/new
6599 * devices and schedule adding/removing them from the mid-layer
6600 * as appropriate.
6601 *
6602 * Return value:
6603 * IPR_RC_JOB_CONTINUE
6604 **/
6605static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6606{
6607 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6608 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006609 struct ipr_config_table_entry_wrapper cfgtew;
6610 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006611 LIST_HEAD(old_res);
6612
6613 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006614 if (ioa_cfg->sis64)
6615 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6616 else
6617 flag = ioa_cfg->u.cfg_table->hdr.flags;
6618
6619 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006620 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6621
6622 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6623 list_move_tail(&res->queue, &old_res);
6624
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006625 if (ioa_cfg->sis64)
6626 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6627 else
6628 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6629
6630 for (i = 0; i < entries; i++) {
6631 if (ioa_cfg->sis64)
6632 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6633 else
6634 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006635 found = 0;
6636
6637 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006638 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006639 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6640 found = 1;
6641 break;
6642 }
6643 }
6644
6645 if (!found) {
6646 if (list_empty(&ioa_cfg->free_res_q)) {
6647 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6648 break;
6649 }
6650
6651 found = 1;
6652 res = list_entry(ioa_cfg->free_res_q.next,
6653 struct ipr_resource_entry, queue);
6654 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006655 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006656 res->add_to_ml = 1;
6657 }
6658
6659 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006660 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006661 }
6662
6663 list_for_each_entry_safe(res, temp, &old_res, queue) {
6664 if (res->sdev) {
6665 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006666 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006668 }
6669 }
6670
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006671 list_for_each_entry_safe(res, temp, &old_res, queue) {
6672 ipr_clear_res_target(res);
6673 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6674 }
6675
Brian Kingac09c342007-04-26 16:00:16 -05006676 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6677 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6678 else
6679 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680
6681 LEAVE;
6682 return IPR_RC_JOB_CONTINUE;
6683}
6684
6685/**
6686 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6687 * @ipr_cmd: ipr command struct
6688 *
6689 * This function sends a Query IOA Configuration command
6690 * to the adapter to retrieve the IOA configuration table.
6691 *
6692 * Return value:
6693 * IPR_RC_JOB_RETURN
6694 **/
6695static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6696{
6697 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6698 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05006700 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006701
6702 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05006703 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6704 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6706 ucode_vpd->major_release, ucode_vpd->card_type,
6707 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6708 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6709 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6710
6711 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006712 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6713 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006714
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006715 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08006716 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006717
6718 ipr_cmd->job_step = ipr_init_res_table;
6719
6720 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6721
6722 LEAVE;
6723 return IPR_RC_JOB_RETURN;
6724}
6725
6726/**
6727 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6728 * @ipr_cmd: ipr command struct
6729 *
6730 * This utility function sends an inquiry to the adapter.
6731 *
6732 * Return value:
6733 * none
6734 **/
6735static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08006736 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737{
6738 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006739
6740 ENTER;
6741 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6742 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6743
6744 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6745 ioarcb->cmd_pkt.cdb[1] = flags;
6746 ioarcb->cmd_pkt.cdb[2] = page;
6747 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6748
Wayne Boyera32c0552010-02-19 13:23:36 -08006749 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750
6751 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6752 LEAVE;
6753}
6754
6755/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06006756 * ipr_inquiry_page_supported - Is the given inquiry page supported
6757 * @page0: inquiry page 0 buffer
6758 * @page: page code.
6759 *
6760 * This function determines if the specified inquiry page is supported.
6761 *
6762 * Return value:
6763 * 1 if page is supported / 0 if not
6764 **/
6765static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6766{
6767 int i;
6768
6769 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6770 if (page0->page[i] == page)
6771 return 1;
6772
6773 return 0;
6774}
6775
6776/**
Brian Kingac09c342007-04-26 16:00:16 -05006777 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6778 * @ipr_cmd: ipr command struct
6779 *
6780 * This function sends a Page 0xD0 inquiry to the adapter
6781 * to retrieve adapter capabilities.
6782 *
6783 * Return value:
6784 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6785 **/
6786static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6787{
6788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6789 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6790 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6791
6792 ENTER;
6793 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6794 memset(cap, 0, sizeof(*cap));
6795
6796 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6797 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6798 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6799 sizeof(struct ipr_inquiry_cap));
6800 return IPR_RC_JOB_RETURN;
6801 }
6802
6803 LEAVE;
6804 return IPR_RC_JOB_CONTINUE;
6805}
6806
6807/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6809 * @ipr_cmd: ipr command struct
6810 *
6811 * This function sends a Page 3 inquiry to the adapter
6812 * to retrieve software VPD information.
6813 *
6814 * Return value:
6815 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6816 **/
6817static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6818{
6819 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006820
6821 ENTER;
6822
Brian Kingac09c342007-04-26 16:00:16 -05006823 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006824
6825 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6826 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6827 sizeof(struct ipr_inquiry_page3));
6828
6829 LEAVE;
6830 return IPR_RC_JOB_RETURN;
6831}
6832
6833/**
6834 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6835 * @ipr_cmd: ipr command struct
6836 *
6837 * This function sends a Page 0 inquiry to the adapter
6838 * to retrieve supported inquiry pages.
6839 *
6840 * Return value:
6841 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6842 **/
6843static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6844{
6845 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006846 char type[5];
6847
6848 ENTER;
6849
6850 /* Grab the type out of the VPD and store it away */
6851 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6852 type[4] = '\0';
6853 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6854
brking@us.ibm.com62275042005-11-01 17:01:14 -06006855 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856
brking@us.ibm.com62275042005-11-01 17:01:14 -06006857 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6858 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6859 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006860
6861 LEAVE;
6862 return IPR_RC_JOB_RETURN;
6863}
6864
6865/**
6866 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6867 * @ipr_cmd: ipr command struct
6868 *
6869 * This function sends a standard inquiry to the adapter.
6870 *
6871 * Return value:
6872 * IPR_RC_JOB_RETURN
6873 **/
6874static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6875{
6876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6877
6878 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006879 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006880
6881 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6882 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6883 sizeof(struct ipr_ioa_vpd));
6884
6885 LEAVE;
6886 return IPR_RC_JOB_RETURN;
6887}
6888
6889/**
6890 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
6891 * @ipr_cmd: ipr command struct
6892 *
6893 * This function send an Identify Host Request Response Queue
6894 * command to establish the HRRQ with the adapter.
6895 *
6896 * Return value:
6897 * IPR_RC_JOB_RETURN
6898 **/
6899static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6900{
6901 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6903
6904 ENTER;
6905 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6906
6907 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6908 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6909
6910 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6911 ioarcb->cmd_pkt.cdb[2] =
6912 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6913 ioarcb->cmd_pkt.cdb[3] =
6914 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6915 ioarcb->cmd_pkt.cdb[4] =
6916 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6917 ioarcb->cmd_pkt.cdb[5] =
6918 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
6919 ioarcb->cmd_pkt.cdb[7] =
6920 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6921 ioarcb->cmd_pkt.cdb[8] =
6922 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6923
6924 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6925
6926 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6927
6928 LEAVE;
6929 return IPR_RC_JOB_RETURN;
6930}
6931
6932/**
6933 * ipr_reset_timer_done - Adapter reset timer function
6934 * @ipr_cmd: ipr command struct
6935 *
6936 * Description: This function is used in adapter reset processing
6937 * for timing events. If the reset_cmd pointer in the IOA
6938 * config struct is not this adapter's we are doing nested
6939 * resets and fail_all_ops will take care of freeing the
6940 * command block.
6941 *
6942 * Return value:
6943 * none
6944 **/
6945static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
6946{
6947 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6948 unsigned long lock_flags = 0;
6949
6950 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6951
6952 if (ioa_cfg->reset_cmd == ipr_cmd) {
6953 list_del(&ipr_cmd->queue);
6954 ipr_cmd->done(ipr_cmd);
6955 }
6956
6957 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6958}
6959
6960/**
6961 * ipr_reset_start_timer - Start a timer for adapter reset job
6962 * @ipr_cmd: ipr command struct
6963 * @timeout: timeout value
6964 *
6965 * Description: This function is used in adapter reset processing
6966 * for timing events. If the reset_cmd pointer in the IOA
6967 * config struct is not this adapter's we are doing nested
6968 * resets and fail_all_ops will take care of freeing the
6969 * command block.
6970 *
6971 * Return value:
6972 * none
6973 **/
6974static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
6975 unsigned long timeout)
6976{
6977 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
6978 ipr_cmd->done = ipr_reset_ioa_job;
6979
6980 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6981 ipr_cmd->timer.expires = jiffies + timeout;
6982 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
6983 add_timer(&ipr_cmd->timer);
6984}
6985
6986/**
6987 * ipr_init_ioa_mem - Initialize ioa_cfg control block
6988 * @ioa_cfg: ioa cfg struct
6989 *
6990 * Return value:
6991 * nothing
6992 **/
6993static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6994{
6995 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
6996
6997 /* Initialize Host RRQ pointers */
6998 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
6999 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7000 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7001 ioa_cfg->toggle_bit = 1;
7002
7003 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007004 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005}
7006
7007/**
7008 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7009 * @ipr_cmd: ipr command struct
7010 *
7011 * This function reinitializes some control blocks and
7012 * enables destructive diagnostics on the adapter.
7013 *
7014 * Return value:
7015 * IPR_RC_JOB_RETURN
7016 **/
7017static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7018{
7019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7020 volatile u32 int_reg;
7021
7022 ENTER;
7023 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
7024 ipr_init_ioa_mem(ioa_cfg);
7025
7026 ioa_cfg->allow_interrupts = 1;
7027 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7028
7029 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7030 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7031 ioa_cfg->regs.clr_interrupt_mask_reg);
7032 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7033 return IPR_RC_JOB_CONTINUE;
7034 }
7035
7036 /* Enable destructive diagnostics on IOA */
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007037 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038
7039 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
7040 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7041
7042 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7043
7044 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007045 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007046 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7047 ipr_cmd->done = ipr_reset_ioa_job;
7048 add_timer(&ipr_cmd->timer);
7049 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7050
7051 LEAVE;
7052 return IPR_RC_JOB_RETURN;
7053}
7054
7055/**
7056 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7057 * @ipr_cmd: ipr command struct
7058 *
7059 * This function is invoked when an adapter dump has run out
7060 * of processing time.
7061 *
7062 * Return value:
7063 * IPR_RC_JOB_CONTINUE
7064 **/
7065static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7066{
7067 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7068
7069 if (ioa_cfg->sdt_state == GET_DUMP)
7070 ioa_cfg->sdt_state = ABORT_DUMP;
7071
7072 ipr_cmd->job_step = ipr_reset_alert;
7073
7074 return IPR_RC_JOB_CONTINUE;
7075}
7076
7077/**
7078 * ipr_unit_check_no_data - Log a unit check/no data error log
7079 * @ioa_cfg: ioa config struct
7080 *
7081 * Logs an error indicating the adapter unit checked, but for some
7082 * reason, we were unable to fetch the unit check buffer.
7083 *
7084 * Return value:
7085 * nothing
7086 **/
7087static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7088{
7089 ioa_cfg->errors_logged++;
7090 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7091}
7092
7093/**
7094 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7095 * @ioa_cfg: ioa config struct
7096 *
7097 * Fetches the unit check buffer from the adapter by clocking the data
7098 * through the mailbox register.
7099 *
7100 * Return value:
7101 * nothing
7102 **/
7103static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7104{
7105 unsigned long mailbox;
7106 struct ipr_hostrcb *hostrcb;
7107 struct ipr_uc_sdt sdt;
7108 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05007109 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110
7111 mailbox = readl(ioa_cfg->ioa_mailbox);
7112
Wayne Boyerdcbad002010-02-19 13:24:14 -08007113 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007114 ipr_unit_check_no_data(ioa_cfg);
7115 return;
7116 }
7117
7118 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7119 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7120 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7121
Wayne Boyerdcbad002010-02-19 13:24:14 -08007122 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7123 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7124 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007125 ipr_unit_check_no_data(ioa_cfg);
7126 return;
7127 }
7128
7129 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08007130 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7131 length = be32_to_cpu(sdt.entry[0].end_token);
7132 else
7133 length = (be32_to_cpu(sdt.entry[0].end_token) -
7134 be32_to_cpu(sdt.entry[0].start_token)) &
7135 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136
7137 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7138 struct ipr_hostrcb, queue);
7139 list_del(&hostrcb->queue);
7140 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7141
7142 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08007143 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07007144 (__be32 *)&hostrcb->hcam,
7145 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7146
Brian King65f56472007-04-26 16:00:12 -05007147 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08007149 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05007150 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7151 ioa_cfg->sdt_state == GET_DUMP)
7152 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7153 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154 ipr_unit_check_no_data(ioa_cfg);
7155
7156 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7157}
7158
7159/**
7160 * ipr_reset_restore_cfg_space - Restore PCI config space.
7161 * @ipr_cmd: ipr command struct
7162 *
7163 * Description: This function restores the saved PCI config space of
7164 * the adapter, fails all outstanding ops back to the callers, and
7165 * fetches the dump/unit check if applicable to this reset.
7166 *
7167 * Return value:
7168 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7169 **/
7170static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7171{
7172 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7173 int rc;
7174
7175 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02007176 ioa_cfg->pdev->state_saved = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007177 rc = pci_restore_state(ioa_cfg->pdev);
7178
7179 if (rc != PCIBIOS_SUCCESSFUL) {
7180 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7181 return IPR_RC_JOB_CONTINUE;
7182 }
7183
7184 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7185 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7186 return IPR_RC_JOB_CONTINUE;
7187 }
7188
7189 ipr_fail_all_ops(ioa_cfg);
7190
7191 if (ioa_cfg->ioa_unit_checked) {
7192 ioa_cfg->ioa_unit_checked = 0;
7193 ipr_get_unit_check_buffer(ioa_cfg);
7194 ipr_cmd->job_step = ipr_reset_alert;
7195 ipr_reset_start_timer(ipr_cmd, 0);
7196 return IPR_RC_JOB_RETURN;
7197 }
7198
7199 if (ioa_cfg->in_ioa_bringdown) {
7200 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7201 } else {
7202 ipr_cmd->job_step = ipr_reset_enable_ioa;
7203
7204 if (GET_DUMP == ioa_cfg->sdt_state) {
7205 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7206 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7207 schedule_work(&ioa_cfg->work_q);
7208 return IPR_RC_JOB_RETURN;
7209 }
7210 }
7211
7212 ENTER;
7213 return IPR_RC_JOB_CONTINUE;
7214}
7215
7216/**
Brian Kinge619e1a2007-01-23 11:25:37 -06007217 * ipr_reset_bist_done - BIST has completed on the adapter.
7218 * @ipr_cmd: ipr command struct
7219 *
7220 * Description: Unblock config space and resume the reset process.
7221 *
7222 * Return value:
7223 * IPR_RC_JOB_CONTINUE
7224 **/
7225static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7226{
7227 ENTER;
7228 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7229 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7230 LEAVE;
7231 return IPR_RC_JOB_CONTINUE;
7232}
7233
7234/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235 * ipr_reset_start_bist - Run BIST on the adapter.
7236 * @ipr_cmd: ipr command struct
7237 *
7238 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7239 *
7240 * Return value:
7241 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7242 **/
7243static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7244{
7245 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7246 int rc;
7247
7248 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07007249 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007250 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7251
7252 if (rc != PCIBIOS_SUCCESSFUL) {
Brian Kinga9aedb02007-03-29 12:43:23 -05007253 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7255 rc = IPR_RC_JOB_CONTINUE;
7256 } else {
Brian Kinge619e1a2007-01-23 11:25:37 -06007257 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7259 rc = IPR_RC_JOB_RETURN;
7260 }
7261
7262 LEAVE;
7263 return rc;
7264}
7265
7266/**
Brian King463fc692007-05-07 17:09:05 -05007267 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7268 * @ipr_cmd: ipr command struct
7269 *
7270 * Description: This clears PCI reset to the adapter and delays two seconds.
7271 *
7272 * Return value:
7273 * IPR_RC_JOB_RETURN
7274 **/
7275static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7276{
7277 ENTER;
7278 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7279 ipr_cmd->job_step = ipr_reset_bist_done;
7280 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7281 LEAVE;
7282 return IPR_RC_JOB_RETURN;
7283}
7284
7285/**
7286 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7287 * @ipr_cmd: ipr command struct
7288 *
7289 * Description: This asserts PCI reset to the adapter.
7290 *
7291 * Return value:
7292 * IPR_RC_JOB_RETURN
7293 **/
7294static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7295{
7296 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7297 struct pci_dev *pdev = ioa_cfg->pdev;
7298
7299 ENTER;
7300 pci_block_user_cfg_access(pdev);
7301 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7302 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7303 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7304 LEAVE;
7305 return IPR_RC_JOB_RETURN;
7306}
7307
7308/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309 * ipr_reset_allowed - Query whether or not IOA can be reset
7310 * @ioa_cfg: ioa config struct
7311 *
7312 * Return value:
7313 * 0 if reset not allowed / non-zero if reset is allowed
7314 **/
7315static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7316{
7317 volatile u32 temp_reg;
7318
7319 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7320 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7321}
7322
7323/**
7324 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7325 * @ipr_cmd: ipr command struct
7326 *
7327 * Description: This function waits for adapter permission to run BIST,
7328 * then runs BIST. If the adapter does not give permission after a
7329 * reasonable time, we will reset the adapter anyway. The impact of
7330 * resetting the adapter without warning the adapter is the risk of
7331 * losing the persistent error log on the adapter. If the adapter is
7332 * reset while it is writing to the flash on the adapter, the flash
7333 * segment will have bad ECC and be zeroed.
7334 *
7335 * Return value:
7336 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7337 **/
7338static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7339{
7340 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7341 int rc = IPR_RC_JOB_RETURN;
7342
7343 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7344 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7345 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7346 } else {
Brian King463fc692007-05-07 17:09:05 -05007347 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007348 rc = IPR_RC_JOB_CONTINUE;
7349 }
7350
7351 return rc;
7352}
7353
7354/**
7355 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7356 * @ipr_cmd: ipr command struct
7357 *
7358 * Description: This function alerts the adapter that it will be reset.
7359 * If memory space is not currently enabled, proceed directly
7360 * to running BIST on the adapter. The timer must always be started
7361 * so we guarantee we do not run BIST from ipr_isr.
7362 *
7363 * Return value:
7364 * IPR_RC_JOB_RETURN
7365 **/
7366static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7367{
7368 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7369 u16 cmd_reg;
7370 int rc;
7371
7372 ENTER;
7373 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7374
7375 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7376 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7377 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
7378 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7379 } else {
Brian King463fc692007-05-07 17:09:05 -05007380 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007381 }
7382
7383 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7384 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7385
7386 LEAVE;
7387 return IPR_RC_JOB_RETURN;
7388}
7389
7390/**
7391 * ipr_reset_ucode_download_done - Microcode download completion
7392 * @ipr_cmd: ipr command struct
7393 *
7394 * Description: This function unmaps the microcode download buffer.
7395 *
7396 * Return value:
7397 * IPR_RC_JOB_CONTINUE
7398 **/
7399static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7400{
7401 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7402 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7403
7404 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7405 sglist->num_sg, DMA_TO_DEVICE);
7406
7407 ipr_cmd->job_step = ipr_reset_alert;
7408 return IPR_RC_JOB_CONTINUE;
7409}
7410
7411/**
7412 * ipr_reset_ucode_download - Download microcode to the adapter
7413 * @ipr_cmd: ipr command struct
7414 *
7415 * Description: This function checks to see if it there is microcode
7416 * to download to the adapter. If there is, a download is performed.
7417 *
7418 * Return value:
7419 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7420 **/
7421static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7422{
7423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7424 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7425
7426 ENTER;
7427 ipr_cmd->job_step = ipr_reset_alert;
7428
7429 if (!sglist)
7430 return IPR_RC_JOB_CONTINUE;
7431
7432 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7433 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7434 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7435 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7436 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7437 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7438 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7439
Wayne Boyera32c0552010-02-19 13:23:36 -08007440 if (ioa_cfg->sis64)
7441 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7442 else
7443 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007444 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7445
7446 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7447 IPR_WRITE_BUFFER_TIMEOUT);
7448
7449 LEAVE;
7450 return IPR_RC_JOB_RETURN;
7451}
7452
7453/**
7454 * ipr_reset_shutdown_ioa - Shutdown the adapter
7455 * @ipr_cmd: ipr command struct
7456 *
7457 * Description: This function issues an adapter shutdown of the
7458 * specified type to the specified adapter as part of the
7459 * adapter reset job.
7460 *
7461 * Return value:
7462 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7463 **/
7464static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7465{
7466 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7467 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7468 unsigned long timeout;
7469 int rc = IPR_RC_JOB_CONTINUE;
7470
7471 ENTER;
7472 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7473 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7474 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7475 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7476 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7477
Brian Kingac09c342007-04-26 16:00:16 -05007478 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7479 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007480 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7481 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05007482 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7483 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484 else
Brian Kingac09c342007-04-26 16:00:16 -05007485 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007486
7487 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7488
7489 rc = IPR_RC_JOB_RETURN;
7490 ipr_cmd->job_step = ipr_reset_ucode_download;
7491 } else
7492 ipr_cmd->job_step = ipr_reset_alert;
7493
7494 LEAVE;
7495 return rc;
7496}
7497
7498/**
7499 * ipr_reset_ioa_job - Adapter reset job
7500 * @ipr_cmd: ipr command struct
7501 *
7502 * Description: This function is the job router for the adapter reset job.
7503 *
7504 * Return value:
7505 * none
7506 **/
7507static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7508{
7509 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7511
7512 do {
7513 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7514
7515 if (ioa_cfg->reset_cmd != ipr_cmd) {
7516 /*
7517 * We are doing nested adapter resets and this is
7518 * not the current reset job.
7519 */
7520 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7521 return;
7522 }
7523
7524 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007525 rc = ipr_cmd->job_step_failed(ipr_cmd);
7526 if (rc == IPR_RC_JOB_RETURN)
7527 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007528 }
7529
7530 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007531 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532 rc = ipr_cmd->job_step(ipr_cmd);
7533 } while(rc == IPR_RC_JOB_CONTINUE);
7534}
7535
7536/**
7537 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7538 * @ioa_cfg: ioa config struct
7539 * @job_step: first job step of reset job
7540 * @shutdown_type: shutdown type
7541 *
7542 * Description: This function will initiate the reset of the given adapter
7543 * starting at the selected job step.
7544 * If the caller needs to wait on the completion of the reset,
7545 * the caller must sleep on the reset_wait_q.
7546 *
7547 * Return value:
7548 * none
7549 **/
7550static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7551 int (*job_step) (struct ipr_cmnd *),
7552 enum ipr_shutdown_type shutdown_type)
7553{
7554 struct ipr_cmnd *ipr_cmd;
7555
7556 ioa_cfg->in_reset_reload = 1;
7557 ioa_cfg->allow_cmds = 0;
7558 scsi_block_requests(ioa_cfg->host);
7559
7560 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7561 ioa_cfg->reset_cmd = ipr_cmd;
7562 ipr_cmd->job_step = job_step;
7563 ipr_cmd->u.shutdown_type = shutdown_type;
7564
7565 ipr_reset_ioa_job(ipr_cmd);
7566}
7567
7568/**
7569 * ipr_initiate_ioa_reset - Initiate an adapter reset
7570 * @ioa_cfg: ioa config struct
7571 * @shutdown_type: shutdown type
7572 *
7573 * Description: This function will initiate the reset of the given adapter.
7574 * If the caller needs to wait on the completion of the reset,
7575 * the caller must sleep on the reset_wait_q.
7576 *
7577 * Return value:
7578 * none
7579 **/
7580static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7581 enum ipr_shutdown_type shutdown_type)
7582{
7583 if (ioa_cfg->ioa_is_dead)
7584 return;
7585
7586 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7587 ioa_cfg->sdt_state = ABORT_DUMP;
7588
7589 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7590 dev_err(&ioa_cfg->pdev->dev,
7591 "IOA taken offline - error recovery failed\n");
7592
7593 ioa_cfg->reset_retries = 0;
7594 ioa_cfg->ioa_is_dead = 1;
7595
7596 if (ioa_cfg->in_ioa_bringdown) {
7597 ioa_cfg->reset_cmd = NULL;
7598 ioa_cfg->in_reset_reload = 0;
7599 ipr_fail_all_ops(ioa_cfg);
7600 wake_up_all(&ioa_cfg->reset_wait_q);
7601
7602 spin_unlock_irq(ioa_cfg->host->host_lock);
7603 scsi_unblock_requests(ioa_cfg->host);
7604 spin_lock_irq(ioa_cfg->host->host_lock);
7605 return;
7606 } else {
7607 ioa_cfg->in_ioa_bringdown = 1;
7608 shutdown_type = IPR_SHUTDOWN_NONE;
7609 }
7610 }
7611
7612 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7613 shutdown_type);
7614}
7615
7616/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007617 * ipr_reset_freeze - Hold off all I/O activity
7618 * @ipr_cmd: ipr command struct
7619 *
7620 * Description: If the PCI slot is frozen, hold off all I/O
7621 * activity; then, as soon as the slot is available again,
7622 * initiate an adapter reset.
7623 */
7624static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7625{
7626 /* Disallow new interrupts, avoid loop */
7627 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7628 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7629 ipr_cmd->done = ipr_reset_ioa_job;
7630 return IPR_RC_JOB_RETURN;
7631}
7632
7633/**
7634 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7635 * @pdev: PCI device struct
7636 *
7637 * Description: This routine is called to tell us that the PCI bus
7638 * is down. Can't do anything here, except put the device driver
7639 * into a holding pattern, waiting for the PCI bus to come back.
7640 */
7641static void ipr_pci_frozen(struct pci_dev *pdev)
7642{
7643 unsigned long flags = 0;
7644 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7645
7646 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7647 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7648 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7649}
7650
7651/**
7652 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7653 * @pdev: PCI device struct
7654 *
7655 * Description: This routine is called by the pci error recovery
7656 * code after the PCI slot has been reset, just before we
7657 * should resume normal operations.
7658 */
7659static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7660{
7661 unsigned long flags = 0;
7662 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7663
7664 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05007665 if (ioa_cfg->needs_warm_reset)
7666 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7667 else
7668 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7669 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007670 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7671 return PCI_ERS_RESULT_RECOVERED;
7672}
7673
7674/**
7675 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7676 * @pdev: PCI device struct
7677 *
7678 * Description: This routine is called when the PCI bus has
7679 * permanently failed.
7680 */
7681static void ipr_pci_perm_failure(struct pci_dev *pdev)
7682{
7683 unsigned long flags = 0;
7684 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7685
7686 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7687 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7688 ioa_cfg->sdt_state = ABORT_DUMP;
7689 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7690 ioa_cfg->in_ioa_bringdown = 1;
Kleber S. Souza6ff63892009-05-04 10:41:02 -03007691 ioa_cfg->allow_cmds = 0;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007692 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7693 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7694}
7695
7696/**
7697 * ipr_pci_error_detected - Called when a PCI error is detected.
7698 * @pdev: PCI device struct
7699 * @state: PCI channel state
7700 *
7701 * Description: Called when a PCI error is detected.
7702 *
7703 * Return value:
7704 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7705 */
7706static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7707 pci_channel_state_t state)
7708{
7709 switch (state) {
7710 case pci_channel_io_frozen:
7711 ipr_pci_frozen(pdev);
7712 return PCI_ERS_RESULT_NEED_RESET;
7713 case pci_channel_io_perm_failure:
7714 ipr_pci_perm_failure(pdev);
7715 return PCI_ERS_RESULT_DISCONNECT;
7716 break;
7717 default:
7718 break;
7719 }
7720 return PCI_ERS_RESULT_NEED_RESET;
7721}
7722
7723/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007724 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7725 * @ioa_cfg: ioa cfg struct
7726 *
7727 * Description: This is the second phase of adapter intialization
7728 * This function takes care of initilizing the adapter to the point
7729 * where it can accept new commands.
7730
7731 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02007732 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07007733 **/
7734static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7735{
7736 int rc = 0;
7737 unsigned long host_lock_flags = 0;
7738
7739 ENTER;
7740 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7741 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06007742 if (ioa_cfg->needs_hard_reset) {
7743 ioa_cfg->needs_hard_reset = 0;
7744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7745 } else
7746 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7747 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748
7749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7750 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7751 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7752
7753 if (ioa_cfg->ioa_is_dead) {
7754 rc = -EIO;
7755 } else if (ipr_invalid_adapter(ioa_cfg)) {
7756 if (!ipr_testmode)
7757 rc = -EIO;
7758
7759 dev_err(&ioa_cfg->pdev->dev,
7760 "Adapter not supported in this hardware configuration.\n");
7761 }
7762
7763 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7764
7765 LEAVE;
7766 return rc;
7767}
7768
7769/**
7770 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7771 * @ioa_cfg: ioa config struct
7772 *
7773 * Return value:
7774 * none
7775 **/
7776static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7777{
7778 int i;
7779
7780 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7781 if (ioa_cfg->ipr_cmnd_list[i])
7782 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7783 ioa_cfg->ipr_cmnd_list[i],
7784 ioa_cfg->ipr_cmnd_list_dma[i]);
7785
7786 ioa_cfg->ipr_cmnd_list[i] = NULL;
7787 }
7788
7789 if (ioa_cfg->ipr_cmd_pool)
7790 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7791
7792 ioa_cfg->ipr_cmd_pool = NULL;
7793}
7794
7795/**
7796 * ipr_free_mem - Frees memory allocated for an adapter
7797 * @ioa_cfg: ioa cfg struct
7798 *
7799 * Return value:
7800 * nothing
7801 **/
7802static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7803{
7804 int i;
7805
7806 kfree(ioa_cfg->res_entries);
7807 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7808 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7809 ipr_free_cmd_blks(ioa_cfg);
7810 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7811 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007812 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7813 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 ioa_cfg->cfg_table_dma);
7815
7816 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7817 pci_free_consistent(ioa_cfg->pdev,
7818 sizeof(struct ipr_hostrcb),
7819 ioa_cfg->hostrcb[i],
7820 ioa_cfg->hostrcb_dma[i]);
7821 }
7822
7823 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824 kfree(ioa_cfg->trace);
7825}
7826
7827/**
7828 * ipr_free_all_resources - Free all allocated resources for an adapter.
7829 * @ipr_cmd: ipr command struct
7830 *
7831 * This function frees all allocated resources for the
7832 * specified adapter.
7833 *
7834 * Return value:
7835 * none
7836 **/
7837static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7838{
7839 struct pci_dev *pdev = ioa_cfg->pdev;
7840
7841 ENTER;
7842 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer5a9ef252009-01-23 09:17:35 -08007843 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007844 iounmap(ioa_cfg->hdw_dma_regs);
7845 pci_release_regions(pdev);
7846 ipr_free_mem(ioa_cfg);
7847 scsi_host_put(ioa_cfg->host);
7848 pci_disable_device(pdev);
7849 LEAVE;
7850}
7851
7852/**
7853 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7854 * @ioa_cfg: ioa config struct
7855 *
7856 * Return value:
7857 * 0 on success / -ENOMEM on allocation failure
7858 **/
7859static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7860{
7861 struct ipr_cmnd *ipr_cmd;
7862 struct ipr_ioarcb *ioarcb;
7863 dma_addr_t dma_addr;
7864 int i;
7865
7866 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
Wayne Boyera32c0552010-02-19 13:23:36 -08007867 sizeof(struct ipr_cmnd), 16, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007868
7869 if (!ioa_cfg->ipr_cmd_pool)
7870 return -ENOMEM;
7871
7872 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Christoph Lametere94b1762006-12-06 20:33:17 -08007873 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007874
7875 if (!ipr_cmd) {
7876 ipr_free_cmd_blks(ioa_cfg);
7877 return -ENOMEM;
7878 }
7879
7880 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7881 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7882 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7883
7884 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08007885 ipr_cmd->dma_addr = dma_addr;
7886 if (ioa_cfg->sis64)
7887 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
7888 else
7889 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
7890
Linus Torvalds1da177e2005-04-16 15:20:36 -07007891 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08007892 if (ioa_cfg->sis64) {
7893 ioarcb->u.sis64_addr_data.data_ioadl_addr =
7894 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7895 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7896 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7897 } else {
7898 ioarcb->write_ioadl_addr =
7899 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
7900 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
7901 ioarcb->ioasa_host_pci_addr =
7902 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
7903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007904 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7905 ipr_cmd->cmd_index = i;
7906 ipr_cmd->ioa_cfg = ioa_cfg;
7907 ipr_cmd->sense_buffer_dma = dma_addr +
7908 offsetof(struct ipr_cmnd, sense_buffer);
7909
7910 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7911 }
7912
7913 return 0;
7914}
7915
7916/**
7917 * ipr_alloc_mem - Allocate memory for an adapter
7918 * @ioa_cfg: ioa config struct
7919 *
7920 * Return value:
7921 * 0 on success / non-zero for error
7922 **/
7923static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7924{
7925 struct pci_dev *pdev = ioa_cfg->pdev;
7926 int i, rc = -ENOMEM;
7927
7928 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06007929 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007930 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931
7932 if (!ioa_cfg->res_entries)
7933 goto out;
7934
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007935 if (ioa_cfg->sis64) {
7936 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
7937 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
7938 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
7939 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
7940 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
7941 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
7942 }
7943
7944 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007946 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
7947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948
7949 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7950 sizeof(struct ipr_misc_cbs),
7951 &ioa_cfg->vpd_cbs_dma);
7952
7953 if (!ioa_cfg->vpd_cbs)
7954 goto out_free_res_entries;
7955
7956 if (ipr_alloc_cmd_blks(ioa_cfg))
7957 goto out_free_vpd_cbs;
7958
7959 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
7960 sizeof(u32) * IPR_NUM_CMD_BLKS,
7961 &ioa_cfg->host_rrq_dma);
7962
7963 if (!ioa_cfg->host_rrq)
7964 goto out_ipr_free_cmd_blocks;
7965
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007966 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7967 ioa_cfg->cfg_table_size,
7968 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007969
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007970 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007971 goto out_free_host_rrq;
7972
7973 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7974 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
7975 sizeof(struct ipr_hostrcb),
7976 &ioa_cfg->hostrcb_dma[i]);
7977
7978 if (!ioa_cfg->hostrcb[i])
7979 goto out_free_hostrcb_dma;
7980
7981 ioa_cfg->hostrcb[i]->hostrcb_dma =
7982 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06007983 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
7985 }
7986
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06007987 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007988 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
7989
7990 if (!ioa_cfg->trace)
7991 goto out_free_hostrcb_dma;
7992
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993 rc = 0;
7994out:
7995 LEAVE;
7996 return rc;
7997
7998out_free_hostrcb_dma:
7999 while (i-- > 0) {
8000 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8001 ioa_cfg->hostrcb[i],
8002 ioa_cfg->hostrcb_dma[i]);
8003 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008004 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8005 ioa_cfg->u.cfg_table,
8006 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008007out_free_host_rrq:
8008 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8009 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8010out_ipr_free_cmd_blocks:
8011 ipr_free_cmd_blks(ioa_cfg);
8012out_free_vpd_cbs:
8013 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8014 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8015out_free_res_entries:
8016 kfree(ioa_cfg->res_entries);
8017 goto out;
8018}
8019
8020/**
8021 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8022 * @ioa_cfg: ioa config struct
8023 *
8024 * Return value:
8025 * none
8026 **/
8027static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8028{
8029 int i;
8030
8031 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8032 ioa_cfg->bus_attr[i].bus = i;
8033 ioa_cfg->bus_attr[i].qas_enabled = 0;
8034 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8035 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8036 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8037 else
8038 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8039 }
8040}
8041
8042/**
8043 * ipr_init_ioa_cfg - Initialize IOA config struct
8044 * @ioa_cfg: ioa config struct
8045 * @host: scsi host struct
8046 * @pdev: PCI dev struct
8047 *
8048 * Return value:
8049 * none
8050 **/
8051static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8052 struct Scsi_Host *host, struct pci_dev *pdev)
8053{
8054 const struct ipr_interrupt_offsets *p;
8055 struct ipr_interrupts *t;
8056 void __iomem *base;
8057
8058 ioa_cfg->host = host;
8059 ioa_cfg->pdev = pdev;
8060 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06008061 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008062 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8063 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8064 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8065 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8066 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8067 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8068 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8069 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8070
8071 INIT_LIST_HEAD(&ioa_cfg->free_q);
8072 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8073 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8074 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8075 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8076 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00008077 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008078 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008079 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008080 ioa_cfg->sdt_state = INACTIVE;
8081
8082 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008083 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008084
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008085 if (ioa_cfg->sis64) {
8086 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8087 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8088 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8089 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8090 } else {
8091 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8092 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8093 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8094 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8097 host->unique_id = host->host_no;
8098 host->max_cmd_len = IPR_MAX_CDB_LEN;
8099 pci_set_drvdata(pdev, ioa_cfg);
8100
8101 p = &ioa_cfg->chip_cfg->regs;
8102 t = &ioa_cfg->regs;
8103 base = ioa_cfg->hdw_dma_regs;
8104
8105 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8106 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8107 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8108 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8109 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8110 t->ioarrin_reg = base + p->ioarrin_reg;
8111 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8112 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8113 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08008114
8115 if (ioa_cfg->sis64) {
8116 t->dump_addr_reg = base + p->dump_addr_reg;
8117 t->dump_data_reg = base + p->dump_data_reg;
8118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119}
8120
8121/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008122 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07008123 * @dev_id: PCI device id struct
8124 *
8125 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008126 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127 **/
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008128static const struct ipr_chip_t * __devinit
8129ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008130{
8131 int i;
8132
Linus Torvalds1da177e2005-04-16 15:20:36 -07008133 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8134 if (ipr_chip[i].vendor == dev_id->vendor &&
8135 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008136 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008137 return NULL;
8138}
8139
8140/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07008141 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8142 * @pdev: PCI device struct
8143 *
8144 * Description: Simply set the msi_received flag to 1 indicating that
8145 * Message Signaled Interrupts are supported.
8146 *
8147 * Return value:
8148 * 0 on success / non-zero on failure
8149 **/
8150static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8151{
8152 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8153 unsigned long lock_flags = 0;
8154 irqreturn_t rc = IRQ_HANDLED;
8155
8156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8157
8158 ioa_cfg->msi_received = 1;
8159 wake_up(&ioa_cfg->msi_wait_q);
8160
8161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8162 return rc;
8163}
8164
8165/**
8166 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8167 * @pdev: PCI device struct
8168 *
8169 * Description: The return value from pci_enable_msi() can not always be
8170 * trusted. This routine sets up and initiates a test interrupt to determine
8171 * if the interrupt is received via the ipr_test_intr() service routine.
8172 * If the tests fails, the driver will fall back to LSI.
8173 *
8174 * Return value:
8175 * 0 on success / non-zero on failure
8176 **/
8177static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8178 struct pci_dev *pdev)
8179{
8180 int rc;
8181 volatile u32 int_reg;
8182 unsigned long lock_flags = 0;
8183
8184 ENTER;
8185
8186 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8187 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8188 ioa_cfg->msi_received = 0;
8189 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8190 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
8191 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8193
8194 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8195 if (rc) {
8196 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8197 return rc;
8198 } else if (ipr_debug)
8199 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8200
8201 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
8202 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8203 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8204 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8205
8206 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8207 if (!ioa_cfg->msi_received) {
8208 /* MSI test failed */
8209 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8210 rc = -EOPNOTSUPP;
8211 } else if (ipr_debug)
8212 dev_info(&pdev->dev, "MSI test succeeded.\n");
8213
8214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8215
8216 free_irq(pdev->irq, ioa_cfg);
8217
8218 LEAVE;
8219
8220 return rc;
8221}
8222
8223/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008224 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8225 * @pdev: PCI device struct
8226 * @dev_id: PCI device id struct
8227 *
8228 * Return value:
8229 * 0 on success / non-zero on failure
8230 **/
8231static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8232 const struct pci_device_id *dev_id)
8233{
8234 struct ipr_ioa_cfg *ioa_cfg;
8235 struct Scsi_Host *host;
8236 unsigned long ipr_regs_pci;
8237 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07008238 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05008239 volatile u32 mask, uproc, interrupts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008240
8241 ENTER;
8242
8243 if ((rc = pci_enable_device(pdev))) {
8244 dev_err(&pdev->dev, "Cannot enable adapter\n");
8245 goto out;
8246 }
8247
8248 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8249
8250 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8251
8252 if (!host) {
8253 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8254 rc = -ENOMEM;
8255 goto out_disable;
8256 }
8257
8258 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8259 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Brian King35a39692006-09-25 12:39:20 -05008260 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8261 sata_port_info.flags, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008262
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008263 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008264
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008265 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008266 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8267 dev_id->vendor, dev_id->device);
8268 goto out_scsi_host_put;
8269 }
8270
Wayne Boyera32c0552010-02-19 13:23:36 -08008271 /* set SIS 32 or SIS 64 */
8272 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008273 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8274
Brian King5469cb52007-03-29 12:42:40 -05008275 if (ipr_transop_timeout)
8276 ioa_cfg->transop_timeout = ipr_transop_timeout;
8277 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8278 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8279 else
8280 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8281
Auke Kok44c10132007-06-08 15:46:36 -07008282 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05008283
Linus Torvalds1da177e2005-04-16 15:20:36 -07008284 ipr_regs_pci = pci_resource_start(pdev, 0);
8285
8286 rc = pci_request_regions(pdev, IPR_NAME);
8287 if (rc < 0) {
8288 dev_err(&pdev->dev,
8289 "Couldn't register memory range of registers\n");
8290 goto out_scsi_host_put;
8291 }
8292
Arjan van de Ven25729a72008-09-28 16:18:02 -07008293 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008294
8295 if (!ipr_regs) {
8296 dev_err(&pdev->dev,
8297 "Couldn't map memory range of registers\n");
8298 rc = -ENOMEM;
8299 goto out_release_regions;
8300 }
8301
8302 ioa_cfg->hdw_dma_regs = ipr_regs;
8303 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8304 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8305
8306 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8307
8308 pci_set_master(pdev);
8309
Wayne Boyera32c0552010-02-19 13:23:36 -08008310 if (ioa_cfg->sis64) {
8311 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8312 if (rc < 0) {
8313 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8314 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8315 }
8316
8317 } else
8318 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8319
Linus Torvalds1da177e2005-04-16 15:20:36 -07008320 if (rc < 0) {
8321 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8322 goto cleanup_nomem;
8323 }
8324
8325 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8326 ioa_cfg->chip_cfg->cache_line_size);
8327
8328 if (rc != PCIBIOS_SUCCESSFUL) {
8329 dev_err(&pdev->dev, "Write of cache line size failed\n");
8330 rc = -EIO;
8331 goto cleanup_nomem;
8332 }
8333
Wayne Boyer95fecd92009-06-16 15:13:28 -07008334 /* Enable MSI style interrupts if they are supported. */
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008335 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07008336 rc = ipr_test_msi(ioa_cfg, pdev);
8337 if (rc == -EOPNOTSUPP)
8338 pci_disable_msi(pdev);
8339 else if (rc)
8340 goto out_msi_disable;
8341 else
8342 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8343 } else if (ipr_debug)
8344 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8345
Linus Torvalds1da177e2005-04-16 15:20:36 -07008346 /* Save away PCI config space for use following IOA reset */
8347 rc = pci_save_state(pdev);
8348
8349 if (rc != PCIBIOS_SUCCESSFUL) {
8350 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8351 rc = -EIO;
8352 goto cleanup_nomem;
8353 }
8354
8355 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8356 goto cleanup_nomem;
8357
8358 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8359 goto cleanup_nomem;
8360
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008361 if (ioa_cfg->sis64)
8362 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8363 + ((sizeof(struct ipr_config_table_entry64)
8364 * ioa_cfg->max_devs_supported)));
8365 else
8366 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8367 + ((sizeof(struct ipr_config_table_entry)
8368 * ioa_cfg->max_devs_supported)));
8369
Linus Torvalds1da177e2005-04-16 15:20:36 -07008370 rc = ipr_alloc_mem(ioa_cfg);
8371 if (rc < 0) {
8372 dev_err(&pdev->dev,
8373 "Couldn't allocate enough memory for device driver!\n");
8374 goto cleanup_nomem;
8375 }
8376
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008377 /*
8378 * If HRRQ updated interrupt is not masked, or reset alert is set,
8379 * the card is in an unknown state and needs a hard reset
8380 */
8381 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
Brian King473b1e82007-05-02 10:44:11 -05008382 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008383 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
8384 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8385 ioa_cfg->needs_hard_reset = 1;
Brian King473b1e82007-05-02 10:44:11 -05008386 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8387 ioa_cfg->needs_hard_reset = 1;
8388 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8389 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008390
Linus Torvalds1da177e2005-04-16 15:20:36 -07008391 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008392 rc = request_irq(pdev->irq, ipr_isr,
8393 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8394 IPR_NAME, ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008395
8396 if (rc) {
8397 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8398 pdev->irq, rc);
8399 goto cleanup_nolog;
8400 }
8401
Brian King463fc692007-05-07 17:09:05 -05008402 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8403 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8404 ioa_cfg->needs_warm_reset = 1;
8405 ioa_cfg->reset = ipr_reset_slot_reset;
8406 } else
8407 ioa_cfg->reset = ipr_reset_start_bist;
8408
Linus Torvalds1da177e2005-04-16 15:20:36 -07008409 spin_lock(&ipr_driver_lock);
8410 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8411 spin_unlock(&ipr_driver_lock);
8412
8413 LEAVE;
8414out:
8415 return rc;
8416
8417cleanup_nolog:
8418 ipr_free_mem(ioa_cfg);
8419cleanup_nomem:
8420 iounmap(ipr_regs);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008421out_msi_disable:
8422 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008423out_release_regions:
8424 pci_release_regions(pdev);
8425out_scsi_host_put:
8426 scsi_host_put(host);
8427out_disable:
8428 pci_disable_device(pdev);
8429 goto out;
8430}
8431
8432/**
8433 * ipr_scan_vsets - Scans for VSET devices
8434 * @ioa_cfg: ioa config struct
8435 *
8436 * Description: Since the VSET resources do not follow SAM in that we can have
8437 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8438 *
8439 * Return value:
8440 * none
8441 **/
8442static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8443{
8444 int target, lun;
8445
8446 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8447 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8448 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8449}
8450
8451/**
8452 * ipr_initiate_ioa_bringdown - Bring down an adapter
8453 * @ioa_cfg: ioa config struct
8454 * @shutdown_type: shutdown type
8455 *
8456 * Description: This function will initiate bringing down the adapter.
8457 * This consists of issuing an IOA shutdown to the adapter
8458 * to flush the cache, and running BIST.
8459 * If the caller needs to wait on the completion of the reset,
8460 * the caller must sleep on the reset_wait_q.
8461 *
8462 * Return value:
8463 * none
8464 **/
8465static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8466 enum ipr_shutdown_type shutdown_type)
8467{
8468 ENTER;
8469 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8470 ioa_cfg->sdt_state = ABORT_DUMP;
8471 ioa_cfg->reset_retries = 0;
8472 ioa_cfg->in_ioa_bringdown = 1;
8473 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8474 LEAVE;
8475}
8476
8477/**
8478 * __ipr_remove - Remove a single adapter
8479 * @pdev: pci device struct
8480 *
8481 * Adapter hot plug remove entry point.
8482 *
8483 * Return value:
8484 * none
8485 **/
8486static void __ipr_remove(struct pci_dev *pdev)
8487{
8488 unsigned long host_lock_flags = 0;
8489 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8490 ENTER;
8491
8492 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008493 while(ioa_cfg->in_reset_reload) {
8494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8495 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8496 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8497 }
8498
Linus Torvalds1da177e2005-04-16 15:20:36 -07008499 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8500
8501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8502 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05008503 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008504 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8505
8506 spin_lock(&ipr_driver_lock);
8507 list_del(&ioa_cfg->queue);
8508 spin_unlock(&ipr_driver_lock);
8509
8510 if (ioa_cfg->sdt_state == ABORT_DUMP)
8511 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8513
8514 ipr_free_all_resources(ioa_cfg);
8515
8516 LEAVE;
8517}
8518
8519/**
8520 * ipr_remove - IOA hot plug remove entry point
8521 * @pdev: pci device struct
8522 *
8523 * Adapter hot plug remove entry point.
8524 *
8525 * Return value:
8526 * none
8527 **/
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008528static void __devexit ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008529{
8530 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8531
8532 ENTER;
8533
Tony Jonesee959b02008-02-22 00:13:36 +01008534 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01008536 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008537 &ipr_dump_attr);
8538 scsi_remove_host(ioa_cfg->host);
8539
8540 __ipr_remove(pdev);
8541
8542 LEAVE;
8543}
8544
8545/**
8546 * ipr_probe - Adapter hot plug add entry point
8547 *
8548 * Return value:
8549 * 0 on success / non-zero on failure
8550 **/
8551static int __devinit ipr_probe(struct pci_dev *pdev,
8552 const struct pci_device_id *dev_id)
8553{
8554 struct ipr_ioa_cfg *ioa_cfg;
8555 int rc;
8556
8557 rc = ipr_probe_ioa(pdev, dev_id);
8558
8559 if (rc)
8560 return rc;
8561
8562 ioa_cfg = pci_get_drvdata(pdev);
8563 rc = ipr_probe_ioa_part2(ioa_cfg);
8564
8565 if (rc) {
8566 __ipr_remove(pdev);
8567 return rc;
8568 }
8569
8570 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8571
8572 if (rc) {
8573 __ipr_remove(pdev);
8574 return rc;
8575 }
8576
Tony Jonesee959b02008-02-22 00:13:36 +01008577 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008578 &ipr_trace_attr);
8579
8580 if (rc) {
8581 scsi_remove_host(ioa_cfg->host);
8582 __ipr_remove(pdev);
8583 return rc;
8584 }
8585
Tony Jonesee959b02008-02-22 00:13:36 +01008586 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008587 &ipr_dump_attr);
8588
8589 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01008590 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008591 &ipr_trace_attr);
8592 scsi_remove_host(ioa_cfg->host);
8593 __ipr_remove(pdev);
8594 return rc;
8595 }
8596
8597 scsi_scan_host(ioa_cfg->host);
8598 ipr_scan_vsets(ioa_cfg);
8599 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8600 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06008601 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008602 schedule_work(&ioa_cfg->work_q);
8603 return 0;
8604}
8605
8606/**
8607 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008608 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07008609 *
8610 * This function is invoked upon system shutdown/reboot. It will issue
8611 * an adapter shutdown to the adapter to flush the write cache.
8612 *
8613 * Return value:
8614 * none
8615 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008616static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008617{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008618 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008619 unsigned long lock_flags = 0;
8620
8621 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008622 while(ioa_cfg->in_reset_reload) {
8623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8624 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8625 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8626 }
8627
Linus Torvalds1da177e2005-04-16 15:20:36 -07008628 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8630 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8631}
8632
8633static struct pci_device_id ipr_pci_table[] __devinitdata = {
8634 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008635 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008636 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008637 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008638 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008639 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008640 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008641 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008642 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008643 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008644 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008645 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008646 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008647 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008648 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05008649 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8650 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008651 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008653 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008654 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8655 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008656 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008657 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8658 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008659 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008660 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008661 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008662 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8663 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06008664 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8666 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008667 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05008668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8669 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05008670 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8672 IPR_USE_LONG_TRANSOP_TIMEOUT },
8673 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8674 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8675 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05008676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -05008677 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06008679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008680 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06008681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008682 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8684 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008685 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8687 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05008688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8690 IPR_USE_LONG_TRANSOP_TIMEOUT },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008691 { }
8692};
8693MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8694
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008695static struct pci_error_handlers ipr_err_handler = {
8696 .error_detected = ipr_pci_error_detected,
8697 .slot_reset = ipr_pci_slot_reset,
8698};
8699
Linus Torvalds1da177e2005-04-16 15:20:36 -07008700static struct pci_driver ipr_driver = {
8701 .name = IPR_NAME,
8702 .id_table = ipr_pci_table,
8703 .probe = ipr_probe,
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008704 .remove = __devexit_p(ipr_remove),
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008705 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008706 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008707};
8708
8709/**
Wayne Boyerf72919e2010-02-19 13:24:21 -08008710 * ipr_halt_done - Shutdown prepare completion
8711 *
8712 * Return value:
8713 * none
8714 **/
8715static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8716{
8717 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8718
8719 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8720}
8721
8722/**
8723 * ipr_halt - Issue shutdown prepare to all adapters
8724 *
8725 * Return value:
8726 * NOTIFY_OK on success / NOTIFY_DONE on failure
8727 **/
8728static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8729{
8730 struct ipr_cmnd *ipr_cmd;
8731 struct ipr_ioa_cfg *ioa_cfg;
8732 unsigned long flags = 0;
8733
8734 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8735 return NOTIFY_DONE;
8736
8737 spin_lock(&ipr_driver_lock);
8738
8739 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8740 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8741 if (!ioa_cfg->allow_cmds) {
8742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8743 continue;
8744 }
8745
8746 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8747 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8748 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8749 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8750 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8751
8752 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8753 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8754 }
8755 spin_unlock(&ipr_driver_lock);
8756
8757 return NOTIFY_OK;
8758}
8759
8760static struct notifier_block ipr_notifier = {
8761 ipr_halt, NULL, 0
8762};
8763
8764/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008765 * ipr_init - Module entry point
8766 *
8767 * Return value:
8768 * 0 on success / negative value on failure
8769 **/
8770static int __init ipr_init(void)
8771{
8772 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8773 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8774
Wayne Boyerf72919e2010-02-19 13:24:21 -08008775 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -07008776 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008777}
8778
8779/**
8780 * ipr_exit - Module unload
8781 *
8782 * Module unload entry point.
8783 *
8784 * Return value:
8785 * none
8786 **/
8787static void __exit ipr_exit(void)
8788{
Wayne Boyerf72919e2010-02-19 13:24:21 -08008789 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008790 pci_unregister_driver(&ipr_driver);
8791}
8792
8793module_init(ipr_init);
8794module_exit(ipr_exit);