blob: f820cffb7f00e7f81fb00566711ee74efbbeab66 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050074#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050075#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080076#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080077#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include "ipr.h"
87
88/*
89 * Global Data
90 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080091static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050096static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060097static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080098static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -050099static unsigned int ipr_dual_ioa_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600104 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 .mailbox = 0x0042C,
106 .cache_line_size = 0x20,
107 {
108 .set_interrupt_mask_reg = 0x0022C,
109 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800110 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800112 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800114 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .ioarrin_reg = 0x00404,
118 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800119 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800121 .set_uproc_interrupt_reg32 = 0x00214,
122 .clr_uproc_interrupt_reg = 0x00218,
123 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 }
125 },
126 { /* Snipe and Scamp */
127 .mailbox = 0x0052C,
128 .cache_line_size = 0x20,
129 {
130 .set_interrupt_mask_reg = 0x00288,
131 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800132 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800134 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800136 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800138 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 .ioarrin_reg = 0x00504,
140 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .set_uproc_interrupt_reg32 = 0x00290,
144 .clr_uproc_interrupt_reg = 0x00294,
145 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 }
147 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800148 { /* CRoC */
149 .mailbox = 0x00040,
150 .cache_line_size = 0x20,
151 {
152 .set_interrupt_mask_reg = 0x00010,
153 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800154 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800155 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800156 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800158 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800159 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800160 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800161 .ioarrin_reg = 0x00070,
162 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800163 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800164 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800165 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800166 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800167 .clr_uproc_interrupt_reg32 = 0x0002C,
168 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800169 .dump_addr_reg = 0x00064,
170 .dump_data_reg = 0x00068
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 }
172 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173};
174
175static const struct ipr_chip_t ipr_chip[] = {
Wayne Boyera32c0552010-02-19 13:23:36 -0800176 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
Wayne Boyerd7b46272010-02-19 13:24:38 -0800182 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185};
186
187static int ipr_max_bus_speeds [] = {
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189};
190
191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193module_param_named(max_speed, ipr_max_speed, uint, 0);
194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195module_param_named(log_level, ipr_log_level, uint, 0);
196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197module_param_named(testmode, ipr_testmode, int, 0);
198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210MODULE_LICENSE("GPL");
211MODULE_VERSION(IPR_DRIVER_VERSION);
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213/* A constant array of IOASCs/URCs/Error Messages */
214static const
215struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500216 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 "8155: An unknown error was received"},
218 {0x00330000, 0, 0,
219 "Soft underlength error"},
220 {0x005A0000, 0, 0,
221 "Command to be cancelled not found"},
222 {0x00808000, 0, 0,
223 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500224 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500226 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500227 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800228 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229 "FFFC: Logical block guard error recovered by the device"},
230 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231 "FFFC: Logical block reference tag error recovered by the device"},
232 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233 "4171: Recovered scatter list tag / sequence number error"},
234 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
238 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FFFD: Recovered logical block reference tag error detected by the IOA"},
240 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500246 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500248 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500250 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500252 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500254 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500256 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500258 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500265 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "8009: Impending cache battery pack failure"},
270 {0x02040400, 0, 0,
271 "34FF: Disk device format in progress"},
Brian King65f56472007-04-26 16:00:12 -0500272 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 {0x023F0000, 0, 0,
275 "Synchronization required"},
276 {0x024E0000, 0, 0,
277 "No ready, IOA shutdown"},
278 {0x025A0000, 0, 0,
279 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "3020: Storage subsystem configuration error"},
282 {0x03110B00, 0, 0,
283 "FFF5: Medium error, data unreadable, recommend reassign"},
284 {0x03110C00, 0, 0,
285 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500288 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500290 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500292 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 "3109: IOA timed out a device command"},
294 {0x04088000, 0, 0,
295 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500296 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500297 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800298 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299 "310C: Logical block guard error detected by the device"},
300 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301 "310C: Logical block reference tag error detected by the device"},
302 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303 "4170: Scatter list tag / sequence number error"},
304 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305 "8150: Logical block CRC error on IOA to Host transfer"},
306 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307 "4170: Logical block sequence number error on IOA to Host transfer"},
308 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309 "310D: Logical block reference tag error detected by the IOA"},
310 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500312 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800318 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500322 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500326 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500328 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500330 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 "8150: PCI bus error"},
332 {0x04430000, 1, 0,
333 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500334 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500336 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500338 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "8151: IOA microcode error"},
342 {0x04448500, 0, 0,
343 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500346 {0x04448700, 0, 0,
347 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 {0x04490000, 0, 0,
349 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500360 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500361 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600365 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500367 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500369 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500370 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500371 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500373 "4110: Unsupported enclosure function"},
Brian King933916f2007-03-29 12:43:30 -0500374 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 "FFF4: Command to logical unit failed"},
376 {0x05240000, 1, 0,
377 "Illegal request, invalid request type or request packet"},
378 {0x05250000, 0, 0,
379 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600380 {0x05258000, 0, 0,
381 "Illegal request, commands not allowed to this device"},
382 {0x05258100, 0, 0,
383 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800384 {0x05258200, 0, 0,
385 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 {0x05260000, 0, 0,
387 "Illegal request, invalid field in parameter list"},
388 {0x05260100, 0, 0,
389 "Illegal request, parameter not supported"},
390 {0x05260200, 0, 0,
391 "Illegal request, parameter value invalid"},
392 {0x052C0000, 0, 0,
393 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600394 {0x052C8000, 1, 0,
395 "Illegal request, dual adapter support not enabled"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 "9040: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500402 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 "FFFB: SCSI bus was reset"},
404 {0x06290500, 0, 0,
405 "FFFE: SCSI bus transition to single ended"},
406 {0x06290600, 0, 0,
407 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500408 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500410 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 "3029: A device replacement has occurred"},
Brian King933916f2007-03-29 12:43:30 -0500412 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500414 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600415 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500416 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500418 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500420 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500422 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600423 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500424 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500425 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500426 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500427 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500428 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500429 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500431 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500433 "4050: Enclosure does not support a required multipath function"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800434 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500436 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600443 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500444 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600445 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 "9032: Array exposed but still protected"},
Brian Kinge4353402007-03-29 12:43:37 -0500448 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500451 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500453 "4060: Multipath redundancy level got worse"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 {0x07270000, 0, 0,
455 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500476 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500478 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500480 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500482 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9063: Maximum number of functional arrays has been exceeded"},
496 {0x0B260000, 0, 0,
497 "Aborted command, invalid descriptor"},
498 {0x0B5A0000, 0, 0,
499 "Command terminated by host"}
500};
501
502static const struct ipr_ses_table_entry ipr_ses_table[] = {
503 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
505 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
511 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
514 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
516};
517
518/*
519 * Function Prototypes
520 */
521static int ipr_reset_alert(struct ipr_cmnd *);
522static void ipr_process_ccn(struct ipr_cmnd *);
523static void ipr_process_error(struct ipr_cmnd *);
524static void ipr_reset_ioa_job(struct ipr_cmnd *);
525static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526 enum ipr_shutdown_type);
527
528#ifdef CONFIG_SCSI_IPR_TRACE
529/**
530 * ipr_trc_hook - Add a trace entry to the driver trace
531 * @ipr_cmd: ipr command struct
532 * @type: trace type
533 * @add_data: additional data
534 *
535 * Return value:
536 * none
537 **/
538static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539 u8 type, u32 add_data)
540{
541 struct ipr_trace_entry *trace_entry;
542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543
544 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545 trace_entry->time = jiffies;
546 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800548 if (ipr_cmd->ioa_cfg->sis64)
549 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550 else
551 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500552 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554 trace_entry->u.add_data = add_data;
555}
556#else
557#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558#endif
559
560/**
561 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562 * @ipr_cmd: ipr command struct
563 *
564 * Return value:
565 * none
566 **/
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700570 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800572 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -0800575 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800577 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800579
Wayne Boyer96d21f02010-05-10 09:13:27 -0700580 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700583 ioasa64->u.gata.status = 0;
584 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800585 ioarcb->write_ioadl_addr =
586 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
587 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700588 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800589 }
590
Wayne Boyer96d21f02010-05-10 09:13:27 -0700591 ioasa->hdr.ioasc = 0;
592 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500594 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 ipr_cmd->sense_buffer[0] = 0;
596 ipr_cmd->dma_use_sg = 0;
597}
598
599/**
600 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
601 * @ipr_cmd: ipr command struct
602 *
603 * Return value:
604 * none
605 **/
606static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
607{
608 ipr_reinit_ipr_cmnd(ipr_cmd);
609 ipr_cmd->u.scratch = 0;
610 ipr_cmd->sibling = NULL;
611 init_timer(&ipr_cmd->timer);
612}
613
614/**
615 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
616 * @ioa_cfg: ioa config struct
617 *
618 * Return value:
619 * pointer to ipr command struct
620 **/
621static
622struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
623{
624 struct ipr_cmnd *ipr_cmd;
625
626 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
627 list_del(&ipr_cmd->queue);
628 ipr_init_ipr_cmnd(ipr_cmd);
629
630 return ipr_cmd;
631}
632
633/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
635 * @ioa_cfg: ioa config struct
636 * @clr_ints: interrupts to clear
637 *
638 * This function masks all interrupts on the adapter, then clears the
639 * interrupts specified in the mask
640 *
641 * Return value:
642 * none
643 **/
644static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
645 u32 clr_ints)
646{
647 volatile u32 int_reg;
648
649 /* Stop new interrupts */
650 ioa_cfg->allow_interrupts = 0;
651
652 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800653 if (ioa_cfg->sis64)
654 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
655 else
656 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800659 if (ioa_cfg->sis64)
660 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
661 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
663}
664
665/**
666 * ipr_save_pcix_cmd_reg - Save PCI-X command register
667 * @ioa_cfg: ioa config struct
668 *
669 * Return value:
670 * 0 on success / -EIO on failure
671 **/
672static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
673{
674 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
675
Brian King7dce0e12007-01-23 11:25:30 -0600676 if (pcix_cmd_reg == 0)
677 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
680 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
681 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
682 return -EIO;
683 }
684
685 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
686 return 0;
687}
688
689/**
690 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
691 * @ioa_cfg: ioa config struct
692 *
693 * Return value:
694 * 0 on success / -EIO on failure
695 **/
696static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
697{
698 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
699
700 if (pcix_cmd_reg) {
701 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
702 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
703 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
704 return -EIO;
705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
707
708 return 0;
709}
710
711/**
Brian King35a39692006-09-25 12:39:20 -0500712 * ipr_sata_eh_done - done function for aborted SATA commands
713 * @ipr_cmd: ipr command struct
714 *
715 * This function is invoked for ops generated to SATA
716 * devices which are being aborted.
717 *
718 * Return value:
719 * none
720 **/
721static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
722{
723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
724 struct ata_queued_cmd *qc = ipr_cmd->qc;
725 struct ipr_sata_port *sata_port = qc->ap->private_data;
726
727 qc->err_mask |= AC_ERR_OTHER;
728 sata_port->ioasa.status |= ATA_BUSY;
729 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
730 ata_qc_complete(qc);
731}
732
733/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 * ipr_scsi_eh_done - mid-layer done function for aborted ops
735 * @ipr_cmd: ipr command struct
736 *
737 * This function is invoked by the interrupt handler for
738 * ops generated by the SCSI mid-layer which are being aborted.
739 *
740 * Return value:
741 * none
742 **/
743static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
744{
745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
747
748 scsi_cmd->result |= (DID_ERROR << 16);
749
FUJITA Tomonori63015bc2007-05-26 00:26:59 +0900750 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 scsi_cmd->scsi_done(scsi_cmd);
752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
753}
754
755/**
756 * ipr_fail_all_ops - Fails all outstanding ops.
757 * @ioa_cfg: ioa config struct
758 *
759 * This function fails all outstanding ops.
760 *
761 * Return value:
762 * none
763 **/
764static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
765{
766 struct ipr_cmnd *ipr_cmd, *temp;
767
768 ENTER;
769 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
770 list_del(&ipr_cmd->queue);
771
Wayne Boyer96d21f02010-05-10 09:13:27 -0700772 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
773 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 if (ipr_cmd->scsi_cmd)
776 ipr_cmd->done = ipr_scsi_eh_done;
Brian King35a39692006-09-25 12:39:20 -0500777 else if (ipr_cmd->qc)
778 ipr_cmd->done = ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
781 del_timer(&ipr_cmd->timer);
782 ipr_cmd->done(ipr_cmd);
783 }
784
785 LEAVE;
786}
787
788/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800789 * ipr_send_command - Send driver initiated requests.
790 * @ipr_cmd: ipr command struct
791 *
792 * This function sends a command to the adapter using the correct write call.
793 * In the case of sis64, calculate the ioarcb size required. Then or in the
794 * appropriate bits.
795 *
796 * Return value:
797 * none
798 **/
799static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
800{
801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
802 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
803
804 if (ioa_cfg->sis64) {
805 /* The default size is 256 bytes */
806 send_dma_addr |= 0x1;
807
808 /* If the number of ioadls * size of ioadl > 128 bytes,
809 then use a 512 byte ioarcb */
810 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
811 send_dma_addr |= 0x4;
812 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813 } else
814 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815}
816
817/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 * ipr_do_req - Send driver initiated requests.
819 * @ipr_cmd: ipr command struct
820 * @done: done function
821 * @timeout_func: timeout function
822 * @timeout: timeout value
823 *
824 * This function sends the specified command to the adapter with the
825 * timeout given. The done function is invoked on command completion.
826 *
827 * Return value:
828 * none
829 **/
830static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
831 void (*done) (struct ipr_cmnd *),
832 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
833{
834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
835
836 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
837
838 ipr_cmd->done = done;
839
840 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
841 ipr_cmd->timer.expires = jiffies + timeout;
842 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
843
844 add_timer(&ipr_cmd->timer);
845
846 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
847
848 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800849
850 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851}
852
853/**
854 * ipr_internal_cmd_done - Op done function for an internally generated op.
855 * @ipr_cmd: ipr command struct
856 *
857 * This function is the op done function for an internally generated,
858 * blocking op. It simply wakes the sleeping thread.
859 *
860 * Return value:
861 * none
862 **/
863static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
864{
865 if (ipr_cmd->sibling)
866 ipr_cmd->sibling = NULL;
867 else
868 complete(&ipr_cmd->completion);
869}
870
871/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800872 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
873 * @ipr_cmd: ipr command struct
874 * @dma_addr: dma address
875 * @len: transfer length
876 * @flags: ioadl flag value
877 *
878 * This function initializes an ioadl in the case where there is only a single
879 * descriptor.
880 *
881 * Return value:
882 * nothing
883 **/
884static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
885 u32 len, int flags)
886{
887 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
888 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
889
890 ipr_cmd->dma_use_sg = 1;
891
892 if (ipr_cmd->ioa_cfg->sis64) {
893 ioadl64->flags = cpu_to_be32(flags);
894 ioadl64->data_len = cpu_to_be32(len);
895 ioadl64->address = cpu_to_be64(dma_addr);
896
897 ipr_cmd->ioarcb.ioadl_len =
898 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
899 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
900 } else {
901 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
902 ioadl->address = cpu_to_be32(dma_addr);
903
904 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
905 ipr_cmd->ioarcb.read_ioadl_len =
906 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
908 } else {
909 ipr_cmd->ioarcb.ioadl_len =
910 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
911 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
912 }
913 }
914}
915
916/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 * ipr_send_blocking_cmd - Send command and sleep on its completion.
918 * @ipr_cmd: ipr command struct
919 * @timeout_func: function to invoke if command times out
920 * @timeout: timeout
921 *
922 * Return value:
923 * none
924 **/
925static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
926 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
927 u32 timeout)
928{
929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
930
931 init_completion(&ipr_cmd->completion);
932 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
933
934 spin_unlock_irq(ioa_cfg->host->host_lock);
935 wait_for_completion(&ipr_cmd->completion);
936 spin_lock_irq(ioa_cfg->host->host_lock);
937}
938
939/**
940 * ipr_send_hcam - Send an HCAM to the adapter.
941 * @ioa_cfg: ioa config struct
942 * @type: HCAM type
943 * @hostrcb: hostrcb struct
944 *
945 * This function will send a Host Controlled Async command to the adapter.
946 * If HCAMs are currently not allowed to be issued to the adapter, it will
947 * place the hostrcb on the free queue.
948 *
949 * Return value:
950 * none
951 **/
952static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
953 struct ipr_hostrcb *hostrcb)
954{
955 struct ipr_cmnd *ipr_cmd;
956 struct ipr_ioarcb *ioarcb;
957
958 if (ioa_cfg->allow_cmds) {
959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
960 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
961 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
962
963 ipr_cmd->u.hostrcb = hostrcb;
964 ioarcb = &ipr_cmd->ioarcb;
965
966 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
967 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
968 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
969 ioarcb->cmd_pkt.cdb[1] = type;
970 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
971 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
972
Wayne Boyera32c0552010-02-19 13:23:36 -0800973 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
974 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
977 ipr_cmd->done = ipr_process_ccn;
978 else
979 ipr_cmd->done = ipr_process_error;
980
981 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
982
983 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -0800984
985 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 } else {
987 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
988 }
989}
990
991/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800992 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800994 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 *
996 * Return value:
997 * none
998 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800999static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000{
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001001 switch(proto) {
1002 case IPR_PROTO_SATA:
1003 case IPR_PROTO_SAS_STP:
1004 res->ata_class = ATA_DEV_ATA;
1005 break;
1006 case IPR_PROTO_SATA_ATAPI:
1007 case IPR_PROTO_SAS_STP_ATAPI:
1008 res->ata_class = ATA_DEV_ATAPI;
1009 break;
1010 default:
1011 res->ata_class = ATA_DEV_UNKNOWN;
1012 break;
1013 };
1014}
1015
1016/**
1017 * ipr_init_res_entry - Initialize a resource entry struct.
1018 * @res: resource entry struct
1019 * @cfgtew: config table entry wrapper struct
1020 *
1021 * Return value:
1022 * none
1023 **/
1024static void ipr_init_res_entry(struct ipr_resource_entry *res,
1025 struct ipr_config_table_entry_wrapper *cfgtew)
1026{
1027 int found = 0;
1028 unsigned int proto;
1029 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1030 struct ipr_resource_entry *gscsi_res = NULL;
1031
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001032 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 res->in_erp = 0;
1034 res->add_to_ml = 0;
1035 res->del_from_ml = 0;
1036 res->resetting_device = 0;
1037 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001038 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001039
1040 if (ioa_cfg->sis64) {
1041 proto = cfgtew->u.cfgte64->proto;
1042 res->res_flags = cfgtew->u.cfgte64->res_flags;
1043 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001044 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001045
1046 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1047 sizeof(res->res_path));
1048
1049 res->bus = 0;
1050 res->lun = scsilun_to_int(&res->dev_lun);
1051
1052 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1053 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1054 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1055 found = 1;
1056 res->target = gscsi_res->target;
1057 break;
1058 }
1059 }
1060 if (!found) {
1061 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1062 ioa_cfg->max_devs_supported);
1063 set_bit(res->target, ioa_cfg->target_ids);
1064 }
1065
1066 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1067 sizeof(res->dev_lun.scsi_lun));
1068 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1069 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070 res->target = 0;
1071 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1072 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074 ioa_cfg->max_devs_supported);
1075 set_bit(res->target, ioa_cfg->array_ids);
1076 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077 res->bus = IPR_VSET_VIRTUAL_BUS;
1078 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079 ioa_cfg->max_devs_supported);
1080 set_bit(res->target, ioa_cfg->vset_ids);
1081 } else {
1082 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083 ioa_cfg->max_devs_supported);
1084 set_bit(res->target, ioa_cfg->target_ids);
1085 }
1086 } else {
1087 proto = cfgtew->u.cfgte->proto;
1088 res->qmodel = IPR_QUEUEING_MODEL(res);
1089 res->flags = cfgtew->u.cfgte->flags;
1090 if (res->flags & IPR_IS_IOA_RESOURCE)
1091 res->type = IPR_RES_TYPE_IOAFP;
1092 else
1093 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095 res->bus = cfgtew->u.cfgte->res_addr.bus;
1096 res->target = cfgtew->u.cfgte->res_addr.target;
1097 res->lun = cfgtew->u.cfgte->res_addr.lun;
1098 }
1099
1100 ipr_update_ata_class(res, proto);
1101}
1102
1103/**
1104 * ipr_is_same_device - Determine if two devices are the same.
1105 * @res: resource entry struct
1106 * @cfgtew: config table entry wrapper struct
1107 *
1108 * Return value:
1109 * 1 if the devices are the same / 0 otherwise
1110 **/
1111static int ipr_is_same_device(struct ipr_resource_entry *res,
1112 struct ipr_config_table_entry_wrapper *cfgtew)
1113{
1114 if (res->ioa_cfg->sis64) {
1115 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1116 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1117 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1118 sizeof(cfgtew->u.cfgte64->lun))) {
1119 return 1;
1120 }
1121 } else {
1122 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1123 res->target == cfgtew->u.cfgte->res_addr.target &&
1124 res->lun == cfgtew->u.cfgte->res_addr.lun)
1125 return 1;
1126 }
1127
1128 return 0;
1129}
1130
1131/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001132 * ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001133 * @res_path: resource path
1134 * @buf: buffer
1135 *
1136 * Return value:
1137 * pointer to buffer
1138 **/
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001139static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001140{
1141 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001142 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001143
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001144 res_path[0] = '\0';
1145 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1146 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1147 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001148
1149 return buffer;
1150}
1151
1152/**
1153 * ipr_update_res_entry - Update the resource entry.
1154 * @res: resource entry struct
1155 * @cfgtew: config table entry wrapper struct
1156 *
1157 * Return value:
1158 * none
1159 **/
1160static void ipr_update_res_entry(struct ipr_resource_entry *res,
1161 struct ipr_config_table_entry_wrapper *cfgtew)
1162{
1163 char buffer[IPR_MAX_RES_PATH_LENGTH];
1164 unsigned int proto;
1165 int new_path = 0;
1166
1167 if (res->ioa_cfg->sis64) {
1168 res->flags = cfgtew->u.cfgte64->flags;
1169 res->res_flags = cfgtew->u.cfgte64->res_flags;
1170 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1171
1172 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1173 sizeof(struct ipr_std_inq_data));
1174
1175 res->qmodel = IPR_QUEUEING_MODEL64(res);
1176 proto = cfgtew->u.cfgte64->proto;
1177 res->res_handle = cfgtew->u.cfgte64->res_handle;
1178 res->dev_id = cfgtew->u.cfgte64->dev_id;
1179
1180 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1181 sizeof(res->dev_lun.scsi_lun));
1182
1183 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1184 sizeof(res->res_path))) {
1185 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1186 sizeof(res->res_path));
1187 new_path = 1;
1188 }
1189
1190 if (res->sdev && new_path)
1191 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001192 ipr_format_res_path(res->res_path, buffer,
1193 sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001194 } else {
1195 res->flags = cfgtew->u.cfgte->flags;
1196 if (res->flags & IPR_IS_IOA_RESOURCE)
1197 res->type = IPR_RES_TYPE_IOAFP;
1198 else
1199 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1200
1201 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1202 sizeof(struct ipr_std_inq_data));
1203
1204 res->qmodel = IPR_QUEUEING_MODEL(res);
1205 proto = cfgtew->u.cfgte->proto;
1206 res->res_handle = cfgtew->u.cfgte->res_handle;
1207 }
1208
1209 ipr_update_ata_class(res, proto);
1210}
1211
1212/**
1213 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1214 * for the resource.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1217 *
1218 * Return value:
1219 * none
1220 **/
1221static void ipr_clear_res_target(struct ipr_resource_entry *res)
1222{
1223 struct ipr_resource_entry *gscsi_res = NULL;
1224 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1225
1226 if (!ioa_cfg->sis64)
1227 return;
1228
1229 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1230 clear_bit(res->target, ioa_cfg->array_ids);
1231 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1232 clear_bit(res->target, ioa_cfg->vset_ids);
1233 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1234 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1235 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1236 return;
1237 clear_bit(res->target, ioa_cfg->target_ids);
1238
1239 } else if (res->bus == 0)
1240 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
1242
1243/**
1244 * ipr_handle_config_change - Handle a config change from the adapter
1245 * @ioa_cfg: ioa config struct
1246 * @hostrcb: hostrcb
1247 *
1248 * Return value:
1249 * none
1250 **/
1251static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001252 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
1254 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001255 struct ipr_config_table_entry_wrapper cfgtew;
1256 __be32 cc_res_handle;
1257
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 u32 is_ndn = 1;
1259
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001260 if (ioa_cfg->sis64) {
1261 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1262 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1263 } else {
1264 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1265 cc_res_handle = cfgtew.u.cfgte->res_handle;
1266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001269 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 is_ndn = 0;
1271 break;
1272 }
1273 }
1274
1275 if (is_ndn) {
1276 if (list_empty(&ioa_cfg->free_res_q)) {
1277 ipr_send_hcam(ioa_cfg,
1278 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1279 hostrcb);
1280 return;
1281 }
1282
1283 res = list_entry(ioa_cfg->free_res_q.next,
1284 struct ipr_resource_entry, queue);
1285
1286 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001287 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1289 }
1290
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001291 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
1293 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1294 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001296 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 if (ioa_cfg->allow_ml_add_del)
1298 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001299 } else {
1300 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 } else if (!res->sdev) {
1304 res->add_to_ml = 1;
1305 if (ioa_cfg->allow_ml_add_del)
1306 schedule_work(&ioa_cfg->work_q);
1307 }
1308
1309 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1310}
1311
1312/**
1313 * ipr_process_ccn - Op done function for a CCN.
1314 * @ipr_cmd: ipr command struct
1315 *
1316 * This function is the op done function for a configuration
1317 * change notification host controlled async from the adapter.
1318 *
1319 * Return value:
1320 * none
1321 **/
1322static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1323{
1324 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1325 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001326 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 list_del(&hostrcb->queue);
1329 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1330
1331 if (ioasc) {
1332 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1333 dev_err(&ioa_cfg->pdev->dev,
1334 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1335
1336 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1337 } else {
1338 ipr_handle_config_change(ioa_cfg, hostrcb);
1339 }
1340}
1341
1342/**
Brian King8cf093e2007-04-26 16:00:14 -05001343 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1344 * @i: index into buffer
1345 * @buf: string to modify
1346 *
1347 * This function will strip all trailing whitespace, pad the end
1348 * of the string with a single space, and NULL terminate the string.
1349 *
1350 * Return value:
1351 * new length of string
1352 **/
1353static int strip_and_pad_whitespace(int i, char *buf)
1354{
1355 while (i && buf[i] == ' ')
1356 i--;
1357 buf[i+1] = ' ';
1358 buf[i+2] = '\0';
1359 return i + 2;
1360}
1361
1362/**
1363 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1364 * @prefix: string to print at start of printk
1365 * @hostrcb: hostrcb pointer
1366 * @vpd: vendor/product id/sn struct
1367 *
1368 * Return value:
1369 * none
1370 **/
1371static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1372 struct ipr_vpd *vpd)
1373{
1374 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1375 int i = 0;
1376
1377 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1378 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1379
1380 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1381 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1382
1383 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1384 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1385
1386 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1387}
1388
1389/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001391 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 *
1393 * Return value:
1394 * none
1395 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001396static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397{
1398 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1399 + IPR_SERIAL_NUM_LEN];
1400
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001401 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1402 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 IPR_PROD_ID_LEN);
1404 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1405 ipr_err("Vendor/Product ID: %s\n", buffer);
1406
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001407 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1409 ipr_err(" Serial Number: %s\n", buffer);
1410}
1411
1412/**
Brian King8cf093e2007-04-26 16:00:14 -05001413 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1414 * @prefix: string to print at start of printk
1415 * @hostrcb: hostrcb pointer
1416 * @vpd: vendor/product id/sn/wwn struct
1417 *
1418 * Return value:
1419 * none
1420 **/
1421static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1422 struct ipr_ext_vpd *vpd)
1423{
1424 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1425 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1426 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1427}
1428
1429/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001430 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1431 * @vpd: vendor/product id/sn/wwn struct
1432 *
1433 * Return value:
1434 * none
1435 **/
1436static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1437{
1438 ipr_log_vpd(&vpd->vpd);
1439 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1440 be32_to_cpu(vpd->wwid[1]));
1441}
1442
1443/**
1444 * ipr_log_enhanced_cache_error - Log a cache error.
1445 * @ioa_cfg: ioa config struct
1446 * @hostrcb: hostrcb struct
1447 *
1448 * Return value:
1449 * none
1450 **/
1451static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1452 struct ipr_hostrcb *hostrcb)
1453{
Wayne Boyer4565e372010-02-19 13:24:07 -08001454 struct ipr_hostrcb_type_12_error *error;
1455
1456 if (ioa_cfg->sis64)
1457 error = &hostrcb->hcam.u.error64.u.type_12_error;
1458 else
1459 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001460
1461 ipr_err("-----Current Configuration-----\n");
1462 ipr_err("Cache Directory Card Information:\n");
1463 ipr_log_ext_vpd(&error->ioa_vpd);
1464 ipr_err("Adapter Card Information:\n");
1465 ipr_log_ext_vpd(&error->cfc_vpd);
1466
1467 ipr_err("-----Expected Configuration-----\n");
1468 ipr_err("Cache Directory Card Information:\n");
1469 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1470 ipr_err("Adapter Card Information:\n");
1471 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1472
1473 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1474 be32_to_cpu(error->ioa_data[0]),
1475 be32_to_cpu(error->ioa_data[1]),
1476 be32_to_cpu(error->ioa_data[2]));
1477}
1478
1479/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 * ipr_log_cache_error - Log a cache error.
1481 * @ioa_cfg: ioa config struct
1482 * @hostrcb: hostrcb struct
1483 *
1484 * Return value:
1485 * none
1486 **/
1487static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1488 struct ipr_hostrcb *hostrcb)
1489{
1490 struct ipr_hostrcb_type_02_error *error =
1491 &hostrcb->hcam.u.error.u.type_02_error;
1492
1493 ipr_err("-----Current Configuration-----\n");
1494 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001495 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001497 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
1499 ipr_err("-----Expected Configuration-----\n");
1500 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001501 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001503 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
1505 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1506 be32_to_cpu(error->ioa_data[0]),
1507 be32_to_cpu(error->ioa_data[1]),
1508 be32_to_cpu(error->ioa_data[2]));
1509}
1510
1511/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001512 * ipr_log_enhanced_config_error - Log a configuration error.
1513 * @ioa_cfg: ioa config struct
1514 * @hostrcb: hostrcb struct
1515 *
1516 * Return value:
1517 * none
1518 **/
1519static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1520 struct ipr_hostrcb *hostrcb)
1521{
1522 int errors_logged, i;
1523 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1524 struct ipr_hostrcb_type_13_error *error;
1525
1526 error = &hostrcb->hcam.u.error.u.type_13_error;
1527 errors_logged = be32_to_cpu(error->errors_logged);
1528
1529 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1530 be32_to_cpu(error->errors_detected), errors_logged);
1531
1532 dev_entry = error->dev;
1533
1534 for (i = 0; i < errors_logged; i++, dev_entry++) {
1535 ipr_err_separator;
1536
1537 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1538 ipr_log_ext_vpd(&dev_entry->vpd);
1539
1540 ipr_err("-----New Device Information-----\n");
1541 ipr_log_ext_vpd(&dev_entry->new_vpd);
1542
1543 ipr_err("Cache Directory Card Information:\n");
1544 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1545
1546 ipr_err("Adapter Card Information:\n");
1547 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1548 }
1549}
1550
1551/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001552 * ipr_log_sis64_config_error - Log a device error.
1553 * @ioa_cfg: ioa config struct
1554 * @hostrcb: hostrcb struct
1555 *
1556 * Return value:
1557 * none
1558 **/
1559static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1560 struct ipr_hostrcb *hostrcb)
1561{
1562 int errors_logged, i;
1563 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1564 struct ipr_hostrcb_type_23_error *error;
1565 char buffer[IPR_MAX_RES_PATH_LENGTH];
1566
1567 error = &hostrcb->hcam.u.error64.u.type_23_error;
1568 errors_logged = be32_to_cpu(error->errors_logged);
1569
1570 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1571 be32_to_cpu(error->errors_detected), errors_logged);
1572
1573 dev_entry = error->dev;
1574
1575 for (i = 0; i < errors_logged; i++, dev_entry++) {
1576 ipr_err_separator;
1577
1578 ipr_err("Device %d : %s", i + 1,
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001579 ipr_format_res_path(dev_entry->res_path, buffer,
1580 sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001581 ipr_log_ext_vpd(&dev_entry->vpd);
1582
1583 ipr_err("-----New Device Information-----\n");
1584 ipr_log_ext_vpd(&dev_entry->new_vpd);
1585
1586 ipr_err("Cache Directory Card Information:\n");
1587 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1588
1589 ipr_err("Adapter Card Information:\n");
1590 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1591 }
1592}
1593
1594/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 * ipr_log_config_error - Log a configuration error.
1596 * @ioa_cfg: ioa config struct
1597 * @hostrcb: hostrcb struct
1598 *
1599 * Return value:
1600 * none
1601 **/
1602static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1603 struct ipr_hostrcb *hostrcb)
1604{
1605 int errors_logged, i;
1606 struct ipr_hostrcb_device_data_entry *dev_entry;
1607 struct ipr_hostrcb_type_03_error *error;
1608
1609 error = &hostrcb->hcam.u.error.u.type_03_error;
1610 errors_logged = be32_to_cpu(error->errors_logged);
1611
1612 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1613 be32_to_cpu(error->errors_detected), errors_logged);
1614
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001615 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617 for (i = 0; i < errors_logged; i++, dev_entry++) {
1618 ipr_err_separator;
1619
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001620 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001621 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001624 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001627 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
1629 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001630 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1633 be32_to_cpu(dev_entry->ioa_data[0]),
1634 be32_to_cpu(dev_entry->ioa_data[1]),
1635 be32_to_cpu(dev_entry->ioa_data[2]),
1636 be32_to_cpu(dev_entry->ioa_data[3]),
1637 be32_to_cpu(dev_entry->ioa_data[4]));
1638 }
1639}
1640
1641/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001642 * ipr_log_enhanced_array_error - Log an array configuration error.
1643 * @ioa_cfg: ioa config struct
1644 * @hostrcb: hostrcb struct
1645 *
1646 * Return value:
1647 * none
1648 **/
1649static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1650 struct ipr_hostrcb *hostrcb)
1651{
1652 int i, num_entries;
1653 struct ipr_hostrcb_type_14_error *error;
1654 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1655 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1656
1657 error = &hostrcb->hcam.u.error.u.type_14_error;
1658
1659 ipr_err_separator;
1660
1661 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1662 error->protection_level,
1663 ioa_cfg->host->host_no,
1664 error->last_func_vset_res_addr.bus,
1665 error->last_func_vset_res_addr.target,
1666 error->last_func_vset_res_addr.lun);
1667
1668 ipr_err_separator;
1669
1670 array_entry = error->array_member;
1671 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1672 sizeof(error->array_member));
1673
1674 for (i = 0; i < num_entries; i++, array_entry++) {
1675 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1676 continue;
1677
1678 if (be32_to_cpu(error->exposed_mode_adn) == i)
1679 ipr_err("Exposed Array Member %d:\n", i);
1680 else
1681 ipr_err("Array Member %d:\n", i);
1682
1683 ipr_log_ext_vpd(&array_entry->vpd);
1684 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1685 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1686 "Expected Location");
1687
1688 ipr_err_separator;
1689 }
1690}
1691
1692/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 * ipr_log_array_error - Log an array configuration error.
1694 * @ioa_cfg: ioa config struct
1695 * @hostrcb: hostrcb struct
1696 *
1697 * Return value:
1698 * none
1699 **/
1700static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1701 struct ipr_hostrcb *hostrcb)
1702{
1703 int i;
1704 struct ipr_hostrcb_type_04_error *error;
1705 struct ipr_hostrcb_array_data_entry *array_entry;
1706 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1707
1708 error = &hostrcb->hcam.u.error.u.type_04_error;
1709
1710 ipr_err_separator;
1711
1712 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1713 error->protection_level,
1714 ioa_cfg->host->host_no,
1715 error->last_func_vset_res_addr.bus,
1716 error->last_func_vset_res_addr.target,
1717 error->last_func_vset_res_addr.lun);
1718
1719 ipr_err_separator;
1720
1721 array_entry = error->array_member;
1722
1723 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001724 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 continue;
1726
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001727 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001729 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001732 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001734 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1735 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1736 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 ipr_err_separator;
1739
1740 if (i == 9)
1741 array_entry = error->array_member2;
1742 else
1743 array_entry++;
1744 }
1745}
1746
1747/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001748 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001749 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001750 * @data: IOA error data
1751 * @len: data length
1752 *
1753 * Return value:
1754 * none
1755 **/
Brian Kingac719ab2006-11-21 10:28:42 -06001756static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001757{
1758 int i;
1759
1760 if (len == 0)
1761 return;
1762
Brian Kingac719ab2006-11-21 10:28:42 -06001763 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1764 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1765
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001766 for (i = 0; i < len / 4; i += 4) {
1767 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1768 be32_to_cpu(data[i]),
1769 be32_to_cpu(data[i+1]),
1770 be32_to_cpu(data[i+2]),
1771 be32_to_cpu(data[i+3]));
1772 }
1773}
1774
1775/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001776 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1777 * @ioa_cfg: ioa config struct
1778 * @hostrcb: hostrcb struct
1779 *
1780 * Return value:
1781 * none
1782 **/
1783static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1784 struct ipr_hostrcb *hostrcb)
1785{
1786 struct ipr_hostrcb_type_17_error *error;
1787
Wayne Boyer4565e372010-02-19 13:24:07 -08001788 if (ioa_cfg->sis64)
1789 error = &hostrcb->hcam.u.error64.u.type_17_error;
1790 else
1791 error = &hostrcb->hcam.u.error.u.type_17_error;
1792
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001793 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001794 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001795
Brian King8cf093e2007-04-26 16:00:14 -05001796 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1797 be32_to_cpu(hostrcb->hcam.u.error.prc));
1798 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001799 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001800 be32_to_cpu(hostrcb->hcam.length) -
1801 (offsetof(struct ipr_hostrcb_error, u) +
1802 offsetof(struct ipr_hostrcb_type_17_error, data)));
1803}
1804
1805/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001806 * ipr_log_dual_ioa_error - Log a dual adapter error.
1807 * @ioa_cfg: ioa config struct
1808 * @hostrcb: hostrcb struct
1809 *
1810 * Return value:
1811 * none
1812 **/
1813static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1814 struct ipr_hostrcb *hostrcb)
1815{
1816 struct ipr_hostrcb_type_07_error *error;
1817
1818 error = &hostrcb->hcam.u.error.u.type_07_error;
1819 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001820 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001821
Brian King8cf093e2007-04-26 16:00:14 -05001822 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1823 be32_to_cpu(hostrcb->hcam.u.error.prc));
1824 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001825 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001826 be32_to_cpu(hostrcb->hcam.length) -
1827 (offsetof(struct ipr_hostrcb_error, u) +
1828 offsetof(struct ipr_hostrcb_type_07_error, data)));
1829}
1830
Brian King49dc6a12006-11-21 10:28:35 -06001831static const struct {
1832 u8 active;
1833 char *desc;
1834} path_active_desc[] = {
1835 { IPR_PATH_NO_INFO, "Path" },
1836 { IPR_PATH_ACTIVE, "Active path" },
1837 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1838};
1839
1840static const struct {
1841 u8 state;
1842 char *desc;
1843} path_state_desc[] = {
1844 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1845 { IPR_PATH_HEALTHY, "is healthy" },
1846 { IPR_PATH_DEGRADED, "is degraded" },
1847 { IPR_PATH_FAILED, "is failed" }
1848};
1849
1850/**
1851 * ipr_log_fabric_path - Log a fabric path error
1852 * @hostrcb: hostrcb struct
1853 * @fabric: fabric descriptor
1854 *
1855 * Return value:
1856 * none
1857 **/
1858static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1859 struct ipr_hostrcb_fabric_desc *fabric)
1860{
1861 int i, j;
1862 u8 path_state = fabric->path_state;
1863 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1864 u8 state = path_state & IPR_PATH_STATE_MASK;
1865
1866 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1867 if (path_active_desc[i].active != active)
1868 continue;
1869
1870 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1871 if (path_state_desc[j].state != state)
1872 continue;
1873
1874 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1875 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1876 path_active_desc[i].desc, path_state_desc[j].desc,
1877 fabric->ioa_port);
1878 } else if (fabric->cascaded_expander == 0xff) {
1879 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1880 path_active_desc[i].desc, path_state_desc[j].desc,
1881 fabric->ioa_port, fabric->phy);
1882 } else if (fabric->phy == 0xff) {
1883 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1884 path_active_desc[i].desc, path_state_desc[j].desc,
1885 fabric->ioa_port, fabric->cascaded_expander);
1886 } else {
1887 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1888 path_active_desc[i].desc, path_state_desc[j].desc,
1889 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1890 }
1891 return;
1892 }
1893 }
1894
1895 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1896 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1897}
1898
Wayne Boyer4565e372010-02-19 13:24:07 -08001899/**
1900 * ipr_log64_fabric_path - Log a fabric path error
1901 * @hostrcb: hostrcb struct
1902 * @fabric: fabric descriptor
1903 *
1904 * Return value:
1905 * none
1906 **/
1907static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1908 struct ipr_hostrcb64_fabric_desc *fabric)
1909{
1910 int i, j;
1911 u8 path_state = fabric->path_state;
1912 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1913 u8 state = path_state & IPR_PATH_STATE_MASK;
1914 char buffer[IPR_MAX_RES_PATH_LENGTH];
1915
1916 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1917 if (path_active_desc[i].active != active)
1918 continue;
1919
1920 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1921 if (path_state_desc[j].state != state)
1922 continue;
1923
1924 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1925 path_active_desc[i].desc, path_state_desc[j].desc,
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001926 ipr_format_res_path(fabric->res_path, buffer,
1927 sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001928 return;
1929 }
1930 }
1931
1932 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001933 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001934}
1935
Brian King49dc6a12006-11-21 10:28:35 -06001936static const struct {
1937 u8 type;
1938 char *desc;
1939} path_type_desc[] = {
1940 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1941 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1942 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1943 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1944};
1945
1946static const struct {
1947 u8 status;
1948 char *desc;
1949} path_status_desc[] = {
1950 { IPR_PATH_CFG_NO_PROB, "Functional" },
1951 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1952 { IPR_PATH_CFG_FAILED, "Failed" },
1953 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1954 { IPR_PATH_NOT_DETECTED, "Missing" },
1955 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1956};
1957
1958static const char *link_rate[] = {
1959 "unknown",
1960 "disabled",
1961 "phy reset problem",
1962 "spinup hold",
1963 "port selector",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "1.5Gbps",
1968 "3.0Gbps",
1969 "unknown",
1970 "unknown",
1971 "unknown",
1972 "unknown",
1973 "unknown",
1974 "unknown"
1975};
1976
1977/**
1978 * ipr_log_path_elem - Log a fabric path element.
1979 * @hostrcb: hostrcb struct
1980 * @cfg: fabric path element struct
1981 *
1982 * Return value:
1983 * none
1984 **/
1985static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1986 struct ipr_hostrcb_config_element *cfg)
1987{
1988 int i, j;
1989 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1990 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1991
1992 if (type == IPR_PATH_CFG_NOT_EXIST)
1993 return;
1994
1995 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1996 if (path_type_desc[i].type != type)
1997 continue;
1998
1999 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2000 if (path_status_desc[j].status != status)
2001 continue;
2002
2003 if (type == IPR_PATH_CFG_IOA_PORT) {
2004 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2005 path_status_desc[j].desc, path_type_desc[i].desc,
2006 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2007 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2008 } else {
2009 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2010 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2011 path_status_desc[j].desc, path_type_desc[i].desc,
2012 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2013 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2014 } else if (cfg->cascaded_expander == 0xff) {
2015 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2016 "WWN=%08X%08X\n", path_status_desc[j].desc,
2017 path_type_desc[i].desc, cfg->phy,
2018 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2019 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2020 } else if (cfg->phy == 0xff) {
2021 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2022 "WWN=%08X%08X\n", path_status_desc[j].desc,
2023 path_type_desc[i].desc, cfg->cascaded_expander,
2024 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2025 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2026 } else {
2027 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2028 "WWN=%08X%08X\n", path_status_desc[j].desc,
2029 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2030 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2031 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2032 }
2033 }
2034 return;
2035 }
2036 }
2037
2038 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2039 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2040 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2041 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2042}
2043
2044/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002045 * ipr_log64_path_elem - Log a fabric path element.
2046 * @hostrcb: hostrcb struct
2047 * @cfg: fabric path element struct
2048 *
2049 * Return value:
2050 * none
2051 **/
2052static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2053 struct ipr_hostrcb64_config_element *cfg)
2054{
2055 int i, j;
2056 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2057 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2058 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2059 char buffer[IPR_MAX_RES_PATH_LENGTH];
2060
2061 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2062 return;
2063
2064 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2065 if (path_type_desc[i].type != type)
2066 continue;
2067
2068 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2069 if (path_status_desc[j].status != status)
2070 continue;
2071
2072 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2073 path_status_desc[j].desc, path_type_desc[i].desc,
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07002074 ipr_format_res_path(cfg->res_path, buffer,
2075 sizeof(buffer)),
Wayne Boyer4565e372010-02-19 13:24:07 -08002076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2078 return;
2079 }
2080 }
2081 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2082 "WWN=%08X%08X\n", cfg->type_status,
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07002083 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
Wayne Boyer4565e372010-02-19 13:24:07 -08002084 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2085 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2086}
2087
2088/**
Brian King49dc6a12006-11-21 10:28:35 -06002089 * ipr_log_fabric_error - Log a fabric error.
2090 * @ioa_cfg: ioa config struct
2091 * @hostrcb: hostrcb struct
2092 *
2093 * Return value:
2094 * none
2095 **/
2096static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2097 struct ipr_hostrcb *hostrcb)
2098{
2099 struct ipr_hostrcb_type_20_error *error;
2100 struct ipr_hostrcb_fabric_desc *fabric;
2101 struct ipr_hostrcb_config_element *cfg;
2102 int i, add_len;
2103
2104 error = &hostrcb->hcam.u.error.u.type_20_error;
2105 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2106 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2107
2108 add_len = be32_to_cpu(hostrcb->hcam.length) -
2109 (offsetof(struct ipr_hostrcb_error, u) +
2110 offsetof(struct ipr_hostrcb_type_20_error, desc));
2111
2112 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2113 ipr_log_fabric_path(hostrcb, fabric);
2114 for_each_fabric_cfg(fabric, cfg)
2115 ipr_log_path_elem(hostrcb, cfg);
2116
2117 add_len -= be16_to_cpu(fabric->length);
2118 fabric = (struct ipr_hostrcb_fabric_desc *)
2119 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2120 }
2121
Brian Kingac719ab2006-11-21 10:28:42 -06002122 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002123}
2124
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002125/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002126 * ipr_log_sis64_array_error - Log a sis64 array error.
2127 * @ioa_cfg: ioa config struct
2128 * @hostrcb: hostrcb struct
2129 *
2130 * Return value:
2131 * none
2132 **/
2133static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2134 struct ipr_hostrcb *hostrcb)
2135{
2136 int i, num_entries;
2137 struct ipr_hostrcb_type_24_error *error;
2138 struct ipr_hostrcb64_array_data_entry *array_entry;
2139 char buffer[IPR_MAX_RES_PATH_LENGTH];
2140 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2141
2142 error = &hostrcb->hcam.u.error64.u.type_24_error;
2143
2144 ipr_err_separator;
2145
2146 ipr_err("RAID %s Array Configuration: %s\n",
2147 error->protection_level,
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07002148 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002149
2150 ipr_err_separator;
2151
2152 array_entry = error->array_member;
2153 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2154 sizeof(error->array_member));
2155
2156 for (i = 0; i < num_entries; i++, array_entry++) {
2157
2158 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2159 continue;
2160
2161 if (error->exposed_mode_adn == i)
2162 ipr_err("Exposed Array Member %d:\n", i);
2163 else
2164 ipr_err("Array Member %d:\n", i);
2165
2166 ipr_err("Array Member %d:\n", i);
2167 ipr_log_ext_vpd(&array_entry->vpd);
2168 ipr_err("Current Location: %s",
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07002169 ipr_format_res_path(array_entry->res_path, buffer,
2170 sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002171 ipr_err("Expected Location: %s",
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07002172 ipr_format_res_path(array_entry->expected_res_path,
2173 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002174
2175 ipr_err_separator;
2176 }
2177}
2178
2179/**
2180 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2181 * @ioa_cfg: ioa config struct
2182 * @hostrcb: hostrcb struct
2183 *
2184 * Return value:
2185 * none
2186 **/
2187static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2188 struct ipr_hostrcb *hostrcb)
2189{
2190 struct ipr_hostrcb_type_30_error *error;
2191 struct ipr_hostrcb64_fabric_desc *fabric;
2192 struct ipr_hostrcb64_config_element *cfg;
2193 int i, add_len;
2194
2195 error = &hostrcb->hcam.u.error64.u.type_30_error;
2196
2197 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2198 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2199
2200 add_len = be32_to_cpu(hostrcb->hcam.length) -
2201 (offsetof(struct ipr_hostrcb64_error, u) +
2202 offsetof(struct ipr_hostrcb_type_30_error, desc));
2203
2204 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2205 ipr_log64_fabric_path(hostrcb, fabric);
2206 for_each_fabric_cfg(fabric, cfg)
2207 ipr_log64_path_elem(hostrcb, cfg);
2208
2209 add_len -= be16_to_cpu(fabric->length);
2210 fabric = (struct ipr_hostrcb64_fabric_desc *)
2211 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2212 }
2213
2214 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2215}
2216
2217/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 * ipr_log_generic_error - Log an adapter error.
2219 * @ioa_cfg: ioa config struct
2220 * @hostrcb: hostrcb struct
2221 *
2222 * Return value:
2223 * none
2224 **/
2225static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2226 struct ipr_hostrcb *hostrcb)
2227{
Brian Kingac719ab2006-11-21 10:28:42 -06002228 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002229 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
2231
2232/**
2233 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2234 * @ioasc: IOASC
2235 *
2236 * This function will return the index of into the ipr_error_table
2237 * for the specified IOASC. If the IOASC is not in the table,
2238 * 0 will be returned, which points to the entry used for unknown errors.
2239 *
2240 * Return value:
2241 * index into the ipr_error_table
2242 **/
2243static u32 ipr_get_error(u32 ioasc)
2244{
2245 int i;
2246
2247 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002248 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 return i;
2250
2251 return 0;
2252}
2253
2254/**
2255 * ipr_handle_log_data - Log an adapter error.
2256 * @ioa_cfg: ioa config struct
2257 * @hostrcb: hostrcb struct
2258 *
2259 * This function logs an adapter error to the system.
2260 *
2261 * Return value:
2262 * none
2263 **/
2264static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2265 struct ipr_hostrcb *hostrcb)
2266{
2267 u32 ioasc;
2268 int error_index;
2269
2270 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2271 return;
2272
2273 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2274 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2275
Wayne Boyer4565e372010-02-19 13:24:07 -08002276 if (ioa_cfg->sis64)
2277 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2278 else
2279 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Wayne Boyer4565e372010-02-19 13:24:07 -08002281 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2282 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2284 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002285 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287
2288 error_index = ipr_get_error(ioasc);
2289
2290 if (!ipr_error_table[error_index].log_hcam)
2291 return;
2292
Brian King49dc6a12006-11-21 10:28:35 -06002293 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 /* Set indication we have logged an error */
2296 ioa_cfg->errors_logged++;
2297
Brian King933916f2007-03-29 12:43:30 -05002298 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002300 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2301 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 case IPR_HOST_RCB_OVERLAY_ID_2:
2305 ipr_log_cache_error(ioa_cfg, hostrcb);
2306 break;
2307 case IPR_HOST_RCB_OVERLAY_ID_3:
2308 ipr_log_config_error(ioa_cfg, hostrcb);
2309 break;
2310 case IPR_HOST_RCB_OVERLAY_ID_4:
2311 case IPR_HOST_RCB_OVERLAY_ID_6:
2312 ipr_log_array_error(ioa_cfg, hostrcb);
2313 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002314 case IPR_HOST_RCB_OVERLAY_ID_7:
2315 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2316 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002317 case IPR_HOST_RCB_OVERLAY_ID_12:
2318 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2319 break;
2320 case IPR_HOST_RCB_OVERLAY_ID_13:
2321 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2322 break;
2323 case IPR_HOST_RCB_OVERLAY_ID_14:
2324 case IPR_HOST_RCB_OVERLAY_ID_16:
2325 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2326 break;
2327 case IPR_HOST_RCB_OVERLAY_ID_17:
2328 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2329 break;
Brian King49dc6a12006-11-21 10:28:35 -06002330 case IPR_HOST_RCB_OVERLAY_ID_20:
2331 ipr_log_fabric_error(ioa_cfg, hostrcb);
2332 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002333 case IPR_HOST_RCB_OVERLAY_ID_23:
2334 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2335 break;
2336 case IPR_HOST_RCB_OVERLAY_ID_24:
2337 case IPR_HOST_RCB_OVERLAY_ID_26:
2338 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2339 break;
2340 case IPR_HOST_RCB_OVERLAY_ID_30:
2341 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2342 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002343 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002346 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 break;
2348 }
2349}
2350
2351/**
2352 * ipr_process_error - Op done function for an adapter error log.
2353 * @ipr_cmd: ipr command struct
2354 *
2355 * This function is the op done function for an error log host
2356 * controlled async from the adapter. It will log the error and
2357 * send the HCAM back to the adapter.
2358 *
2359 * Return value:
2360 * none
2361 **/
2362static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2363{
2364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2365 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002366 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002367 u32 fd_ioasc;
2368
2369 if (ioa_cfg->sis64)
2370 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2371 else
2372 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
2374 list_del(&hostrcb->queue);
2375 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2376
2377 if (!ioasc) {
2378 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002379 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2380 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2382 dev_err(&ioa_cfg->pdev->dev,
2383 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2384 }
2385
2386 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2387}
2388
2389/**
2390 * ipr_timeout - An internally generated op has timed out.
2391 * @ipr_cmd: ipr command struct
2392 *
2393 * This function blocks host requests and initiates an
2394 * adapter reset.
2395 *
2396 * Return value:
2397 * none
2398 **/
2399static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2400{
2401 unsigned long lock_flags = 0;
2402 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2403
2404 ENTER;
2405 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2406
2407 ioa_cfg->errors_logged++;
2408 dev_err(&ioa_cfg->pdev->dev,
2409 "Adapter being reset due to command timeout.\n");
2410
2411 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2412 ioa_cfg->sdt_state = GET_DUMP;
2413
2414 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2416
2417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2418 LEAVE;
2419}
2420
2421/**
2422 * ipr_oper_timeout - Adapter timed out transitioning to operational
2423 * @ipr_cmd: ipr command struct
2424 *
2425 * This function blocks host requests and initiates an
2426 * adapter reset.
2427 *
2428 * Return value:
2429 * none
2430 **/
2431static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2432{
2433 unsigned long lock_flags = 0;
2434 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2435
2436 ENTER;
2437 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2438
2439 ioa_cfg->errors_logged++;
2440 dev_err(&ioa_cfg->pdev->dev,
2441 "Adapter timed out transitioning to operational.\n");
2442
2443 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2444 ioa_cfg->sdt_state = GET_DUMP;
2445
2446 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2447 if (ipr_fastfail)
2448 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2449 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2450 }
2451
2452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2453 LEAVE;
2454}
2455
2456/**
2457 * ipr_reset_reload - Reset/Reload the IOA
2458 * @ioa_cfg: ioa config struct
2459 * @shutdown_type: shutdown type
2460 *
2461 * This function resets the adapter and re-initializes it.
2462 * This function assumes that all new host commands have been stopped.
2463 * Return value:
2464 * SUCCESS / FAILED
2465 **/
2466static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2467 enum ipr_shutdown_type shutdown_type)
2468{
2469 if (!ioa_cfg->in_reset_reload)
2470 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2471
2472 spin_unlock_irq(ioa_cfg->host->host_lock);
2473 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2474 spin_lock_irq(ioa_cfg->host->host_lock);
2475
2476 /* If we got hit with a host reset while we were already resetting
2477 the adapter for some reason, and the reset failed. */
2478 if (ioa_cfg->ioa_is_dead) {
2479 ipr_trace;
2480 return FAILED;
2481 }
2482
2483 return SUCCESS;
2484}
2485
2486/**
2487 * ipr_find_ses_entry - Find matching SES in SES table
2488 * @res: resource entry struct of SES
2489 *
2490 * Return value:
2491 * pointer to SES table entry / NULL on failure
2492 **/
2493static const struct ipr_ses_table_entry *
2494ipr_find_ses_entry(struct ipr_resource_entry *res)
2495{
2496 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002497 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2499
2500 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2501 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2502 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002503 vpids = &res->std_inq_data.vpids;
2504 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 matches++;
2506 else
2507 break;
2508 } else
2509 matches++;
2510 }
2511
2512 if (matches == IPR_PROD_ID_LEN)
2513 return ste;
2514 }
2515
2516 return NULL;
2517}
2518
2519/**
2520 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2521 * @ioa_cfg: ioa config struct
2522 * @bus: SCSI bus
2523 * @bus_width: bus width
2524 *
2525 * Return value:
2526 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2527 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2528 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2529 * max 160MHz = max 320MB/sec).
2530 **/
2531static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2532{
2533 struct ipr_resource_entry *res;
2534 const struct ipr_ses_table_entry *ste;
2535 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2536
2537 /* Loop through each config table entry in the config table buffer */
2538 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002539 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 continue;
2541
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002542 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 continue;
2544
2545 if (!(ste = ipr_find_ses_entry(res)))
2546 continue;
2547
2548 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2549 }
2550
2551 return max_xfer_rate;
2552}
2553
2554/**
2555 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2556 * @ioa_cfg: ioa config struct
2557 * @max_delay: max delay in micro-seconds to wait
2558 *
2559 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2560 *
2561 * Return value:
2562 * 0 on success / other on failure
2563 **/
2564static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2565{
2566 volatile u32 pcii_reg;
2567 int delay = 1;
2568
2569 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2570 while (delay < max_delay) {
2571 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2572
2573 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2574 return 0;
2575
2576 /* udelay cannot be used if delay is more than a few milliseconds */
2577 if ((delay / 1000) > MAX_UDELAY_MS)
2578 mdelay(delay / 1000);
2579 else
2580 udelay(delay);
2581
2582 delay += delay;
2583 }
2584 return -EIO;
2585}
2586
2587/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002588 * ipr_get_sis64_dump_data_section - Dump IOA memory
2589 * @ioa_cfg: ioa config struct
2590 * @start_addr: adapter address to dump
2591 * @dest: destination kernel buffer
2592 * @length_in_words: length to dump in 4 byte words
2593 *
2594 * Return value:
2595 * 0 on success
2596 **/
2597static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2598 u32 start_addr,
2599 __be32 *dest, u32 length_in_words)
2600{
2601 int i;
2602
2603 for (i = 0; i < length_in_words; i++) {
2604 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2605 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2606 dest++;
2607 }
2608
2609 return 0;
2610}
2611
2612/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 * ipr_get_ldump_data_section - Dump IOA memory
2614 * @ioa_cfg: ioa config struct
2615 * @start_addr: adapter address to dump
2616 * @dest: destination kernel buffer
2617 * @length_in_words: length to dump in 4 byte words
2618 *
2619 * Return value:
2620 * 0 on success / -EIO on failure
2621 **/
2622static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2623 u32 start_addr,
2624 __be32 *dest, u32 length_in_words)
2625{
2626 volatile u32 temp_pcii_reg;
2627 int i, delay = 0;
2628
Wayne Boyerdcbad002010-02-19 13:24:14 -08002629 if (ioa_cfg->sis64)
2630 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2631 dest, length_in_words);
2632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 /* Write IOA interrupt reg starting LDUMP state */
2634 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002635 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636
2637 /* Wait for IO debug acknowledge */
2638 if (ipr_wait_iodbg_ack(ioa_cfg,
2639 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2640 dev_err(&ioa_cfg->pdev->dev,
2641 "IOA dump long data transfer timeout\n");
2642 return -EIO;
2643 }
2644
2645 /* Signal LDUMP interlocked - clear IO debug ack */
2646 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2647 ioa_cfg->regs.clr_interrupt_reg);
2648
2649 /* Write Mailbox with starting address */
2650 writel(start_addr, ioa_cfg->ioa_mailbox);
2651
2652 /* Signal address valid - clear IOA Reset alert */
2653 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002654 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655
2656 for (i = 0; i < length_in_words; i++) {
2657 /* Wait for IO debug acknowledge */
2658 if (ipr_wait_iodbg_ack(ioa_cfg,
2659 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2660 dev_err(&ioa_cfg->pdev->dev,
2661 "IOA dump short data transfer timeout\n");
2662 return -EIO;
2663 }
2664
2665 /* Read data from mailbox and increment destination pointer */
2666 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2667 dest++;
2668
2669 /* For all but the last word of data, signal data received */
2670 if (i < (length_in_words - 1)) {
2671 /* Signal dump data received - Clear IO debug Ack */
2672 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2673 ioa_cfg->regs.clr_interrupt_reg);
2674 }
2675 }
2676
2677 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2678 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002679 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680
2681 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002682 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
2684 /* Signal dump data received - Clear IO debug Ack */
2685 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2686 ioa_cfg->regs.clr_interrupt_reg);
2687
2688 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2689 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2690 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002691 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
2693 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2694 return 0;
2695
2696 udelay(10);
2697 delay += 10;
2698 }
2699
2700 return 0;
2701}
2702
2703#ifdef CONFIG_SCSI_IPR_DUMP
2704/**
2705 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2706 * @ioa_cfg: ioa config struct
2707 * @pci_address: adapter address
2708 * @length: length of data to copy
2709 *
2710 * Copy data from PCI adapter to kernel buffer.
2711 * Note: length MUST be a 4 byte multiple
2712 * Return value:
2713 * 0 on success / other on failure
2714 **/
2715static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2716 unsigned long pci_address, u32 length)
2717{
2718 int bytes_copied = 0;
2719 int cur_len, rc, rem_len, rem_page_len;
2720 __be32 *page;
2721 unsigned long lock_flags = 0;
2722 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2723
2724 while (bytes_copied < length &&
2725 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2726 if (ioa_dump->page_offset >= PAGE_SIZE ||
2727 ioa_dump->page_offset == 0) {
2728 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2729
2730 if (!page) {
2731 ipr_trace;
2732 return bytes_copied;
2733 }
2734
2735 ioa_dump->page_offset = 0;
2736 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2737 ioa_dump->next_page_index++;
2738 } else
2739 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2740
2741 rem_len = length - bytes_copied;
2742 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2743 cur_len = min(rem_len, rem_page_len);
2744
2745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2746 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2747 rc = -EIO;
2748 } else {
2749 rc = ipr_get_ldump_data_section(ioa_cfg,
2750 pci_address + bytes_copied,
2751 &page[ioa_dump->page_offset / 4],
2752 (cur_len / sizeof(u32)));
2753 }
2754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2755
2756 if (!rc) {
2757 ioa_dump->page_offset += cur_len;
2758 bytes_copied += cur_len;
2759 } else {
2760 ipr_trace;
2761 break;
2762 }
2763 schedule();
2764 }
2765
2766 return bytes_copied;
2767}
2768
2769/**
2770 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2771 * @hdr: dump entry header struct
2772 *
2773 * Return value:
2774 * nothing
2775 **/
2776static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2777{
2778 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2779 hdr->num_elems = 1;
2780 hdr->offset = sizeof(*hdr);
2781 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2782}
2783
2784/**
2785 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2786 * @ioa_cfg: ioa config struct
2787 * @driver_dump: driver dump struct
2788 *
2789 * Return value:
2790 * nothing
2791 **/
2792static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2793 struct ipr_driver_dump *driver_dump)
2794{
2795 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2796
2797 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2798 driver_dump->ioa_type_entry.hdr.len =
2799 sizeof(struct ipr_dump_ioa_type_entry) -
2800 sizeof(struct ipr_dump_entry_header);
2801 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2802 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2803 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2804 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2805 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2806 ucode_vpd->minor_release[1];
2807 driver_dump->hdr.num_entries++;
2808}
2809
2810/**
2811 * ipr_dump_version_data - Fill in the driver version in the dump.
2812 * @ioa_cfg: ioa config struct
2813 * @driver_dump: driver dump struct
2814 *
2815 * Return value:
2816 * nothing
2817 **/
2818static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2819 struct ipr_driver_dump *driver_dump)
2820{
2821 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2822 driver_dump->version_entry.hdr.len =
2823 sizeof(struct ipr_dump_version_entry) -
2824 sizeof(struct ipr_dump_entry_header);
2825 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2826 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2827 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2828 driver_dump->hdr.num_entries++;
2829}
2830
2831/**
2832 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2833 * @ioa_cfg: ioa config struct
2834 * @driver_dump: driver dump struct
2835 *
2836 * Return value:
2837 * nothing
2838 **/
2839static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2840 struct ipr_driver_dump *driver_dump)
2841{
2842 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2843 driver_dump->trace_entry.hdr.len =
2844 sizeof(struct ipr_dump_trace_entry) -
2845 sizeof(struct ipr_dump_entry_header);
2846 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2847 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2848 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2849 driver_dump->hdr.num_entries++;
2850}
2851
2852/**
2853 * ipr_dump_location_data - Fill in the IOA location in the dump.
2854 * @ioa_cfg: ioa config struct
2855 * @driver_dump: driver dump struct
2856 *
2857 * Return value:
2858 * nothing
2859 **/
2860static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2861 struct ipr_driver_dump *driver_dump)
2862{
2863 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2864 driver_dump->location_entry.hdr.len =
2865 sizeof(struct ipr_dump_location_entry) -
2866 sizeof(struct ipr_dump_entry_header);
2867 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2868 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01002869 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 driver_dump->hdr.num_entries++;
2871}
2872
2873/**
2874 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2875 * @ioa_cfg: ioa config struct
2876 * @dump: dump struct
2877 *
2878 * Return value:
2879 * nothing
2880 **/
2881static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2882{
2883 unsigned long start_addr, sdt_word;
2884 unsigned long lock_flags = 0;
2885 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2886 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2887 u32 num_entries, start_off, end_off;
2888 u32 bytes_to_copy, bytes_copied, rc;
2889 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08002890 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 int i;
2892
2893 ENTER;
2894
2895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2896
2897 if (ioa_cfg->sdt_state != GET_DUMP) {
2898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2899 return;
2900 }
2901
2902 start_addr = readl(ioa_cfg->ioa_mailbox);
2903
Wayne Boyerdcbad002010-02-19 13:24:14 -08002904 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 dev_err(&ioa_cfg->pdev->dev,
2906 "Invalid dump table format: %lx\n", start_addr);
2907 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2908 return;
2909 }
2910
2911 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2912
2913 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2914
2915 /* Initialize the overall dump header */
2916 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2917 driver_dump->hdr.num_entries = 1;
2918 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2919 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2920 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2921 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2922
2923 ipr_dump_version_data(ioa_cfg, driver_dump);
2924 ipr_dump_location_data(ioa_cfg, driver_dump);
2925 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2926 ipr_dump_trace_data(ioa_cfg, driver_dump);
2927
2928 /* Update dump_header */
2929 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2930
2931 /* IOA Dump entry */
2932 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 ioa_dump->hdr.len = 0;
2934 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2935 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2936
2937 /* First entries in sdt are actually a list of dump addresses and
2938 lengths to gather the real dump data. sdt represents the pointer
2939 to the ioa generated dump table. Dump data will be extracted based
2940 on entries in this table */
2941 sdt = &ioa_dump->sdt;
2942
2943 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2944 sizeof(struct ipr_sdt) / sizeof(__be32));
2945
2946 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08002947 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2948 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 dev_err(&ioa_cfg->pdev->dev,
2950 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2951 rc, be32_to_cpu(sdt->hdr.state));
2952 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2953 ioa_cfg->sdt_state = DUMP_OBTAINED;
2954 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2955 return;
2956 }
2957
2958 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2959
2960 if (num_entries > IPR_NUM_SDT_ENTRIES)
2961 num_entries = IPR_NUM_SDT_ENTRIES;
2962
2963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964
2965 for (i = 0; i < num_entries; i++) {
2966 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2967 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2968 break;
2969 }
2970
2971 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08002972 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2973 if (ioa_cfg->sis64)
2974 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2975 else {
2976 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2977 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Wayne Boyerdcbad002010-02-19 13:24:14 -08002979 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2980 bytes_to_copy = end_off - start_off;
2981 else
2982 valid = 0;
2983 }
2984 if (valid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2986 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2987 continue;
2988 }
2989
2990 /* Copy data from adapter to driver buffers */
2991 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2992 bytes_to_copy);
2993
2994 ioa_dump->hdr.len += bytes_copied;
2995
2996 if (bytes_copied != bytes_to_copy) {
2997 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2998 break;
2999 }
3000 }
3001 }
3002 }
3003
3004 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3005
3006 /* Update dump_header */
3007 driver_dump->hdr.len += ioa_dump->hdr.len;
3008 wmb();
3009 ioa_cfg->sdt_state = DUMP_OBTAINED;
3010 LEAVE;
3011}
3012
3013#else
3014#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3015#endif
3016
3017/**
3018 * ipr_release_dump - Free adapter dump memory
3019 * @kref: kref struct
3020 *
3021 * Return value:
3022 * nothing
3023 **/
3024static void ipr_release_dump(struct kref *kref)
3025{
3026 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3027 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3028 unsigned long lock_flags = 0;
3029 int i;
3030
3031 ENTER;
3032 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3033 ioa_cfg->dump = NULL;
3034 ioa_cfg->sdt_state = INACTIVE;
3035 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3036
3037 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3038 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3039
3040 kfree(dump);
3041 LEAVE;
3042}
3043
3044/**
3045 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003046 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 *
3048 * Called at task level from a work thread. This function takes care
3049 * of adding and removing device from the mid-layer as configuration
3050 * changes are detected by the adapter.
3051 *
3052 * Return value:
3053 * nothing
3054 **/
David Howellsc4028952006-11-22 14:57:56 +00003055static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056{
3057 unsigned long lock_flags;
3058 struct ipr_resource_entry *res;
3059 struct scsi_device *sdev;
3060 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003061 struct ipr_ioa_cfg *ioa_cfg =
3062 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 u8 bus, target, lun;
3064 int did_work;
3065
3066 ENTER;
3067 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068
3069 if (ioa_cfg->sdt_state == GET_DUMP) {
3070 dump = ioa_cfg->dump;
3071 if (!dump) {
3072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3073 return;
3074 }
3075 kref_get(&dump->kref);
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 ipr_get_ioa_dump(ioa_cfg, dump);
3078 kref_put(&dump->kref, ipr_release_dump);
3079
3080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3081 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3082 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084 return;
3085 }
3086
3087restart:
3088 do {
3089 did_work = 0;
3090 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 return;
3093 }
3094
3095 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3096 if (res->del_from_ml && res->sdev) {
3097 did_work = 1;
3098 sdev = res->sdev;
3099 if (!scsi_device_get(sdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3102 scsi_remove_device(sdev);
3103 scsi_device_put(sdev);
3104 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3105 }
3106 break;
3107 }
3108 }
3109 } while(did_work);
3110
3111 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3112 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003113 bus = res->bus;
3114 target = res->target;
3115 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003116 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3118 scsi_add_device(ioa_cfg->host, bus, target, lun);
3119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3120 goto restart;
3121 }
3122 }
3123
3124 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003125 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 LEAVE;
3127}
3128
3129#ifdef CONFIG_SCSI_IPR_TRACE
3130/**
3131 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003132 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003134 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 * @buf: buffer
3136 * @off: offset
3137 * @count: buffer size
3138 *
3139 * Return value:
3140 * number of bytes printed to buffer
3141 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003142static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003143 struct bin_attribute *bin_attr,
3144 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145{
Tony Jonesee959b02008-02-22 00:13:36 +01003146 struct device *dev = container_of(kobj, struct device, kobj);
3147 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3149 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003150 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
3152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003153 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3154 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003156
3157 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158}
3159
3160static struct bin_attribute ipr_trace_attr = {
3161 .attr = {
3162 .name = "trace",
3163 .mode = S_IRUGO,
3164 },
3165 .size = 0,
3166 .read = ipr_read_trace,
3167};
3168#endif
3169
3170/**
3171 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003172 * @dev: class device struct
3173 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 *
3175 * Return value:
3176 * number of bytes printed to buffer
3177 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003178static ssize_t ipr_show_fw_version(struct device *dev,
3179 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180{
Tony Jonesee959b02008-02-22 00:13:36 +01003181 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3183 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3184 unsigned long lock_flags = 0;
3185 int len;
3186
3187 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3188 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3189 ucode_vpd->major_release, ucode_vpd->card_type,
3190 ucode_vpd->minor_release[0],
3191 ucode_vpd->minor_release[1]);
3192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3193 return len;
3194}
3195
Tony Jonesee959b02008-02-22 00:13:36 +01003196static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 .attr = {
3198 .name = "fw_version",
3199 .mode = S_IRUGO,
3200 },
3201 .show = ipr_show_fw_version,
3202};
3203
3204/**
3205 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003206 * @dev: class device struct
3207 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 *
3209 * Return value:
3210 * number of bytes printed to buffer
3211 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003212static ssize_t ipr_show_log_level(struct device *dev,
3213 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214{
Tony Jonesee959b02008-02-22 00:13:36 +01003215 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3217 unsigned long lock_flags = 0;
3218 int len;
3219
3220 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3221 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3222 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3223 return len;
3224}
3225
3226/**
3227 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003228 * @dev: class device struct
3229 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 *
3231 * Return value:
3232 * number of bytes printed to buffer
3233 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003234static ssize_t ipr_store_log_level(struct device *dev,
3235 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 const char *buf, size_t count)
3237{
Tony Jonesee959b02008-02-22 00:13:36 +01003238 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3240 unsigned long lock_flags = 0;
3241
3242 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3243 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3245 return strlen(buf);
3246}
3247
Tony Jonesee959b02008-02-22 00:13:36 +01003248static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 .attr = {
3250 .name = "log_level",
3251 .mode = S_IRUGO | S_IWUSR,
3252 },
3253 .show = ipr_show_log_level,
3254 .store = ipr_store_log_level
3255};
3256
3257/**
3258 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003259 * @dev: device struct
3260 * @buf: buffer
3261 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 *
3263 * This function will reset the adapter and wait a reasonable
3264 * amount of time for any errors that the adapter might log.
3265 *
3266 * Return value:
3267 * count on success / other on failure
3268 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003269static ssize_t ipr_store_diagnostics(struct device *dev,
3270 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 const char *buf, size_t count)
3272{
Tony Jonesee959b02008-02-22 00:13:36 +01003273 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3275 unsigned long lock_flags = 0;
3276 int rc = count;
3277
3278 if (!capable(CAP_SYS_ADMIN))
3279 return -EACCES;
3280
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003282 while(ioa_cfg->in_reset_reload) {
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3285 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3286 }
3287
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 ioa_cfg->errors_logged = 0;
3289 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3290
3291 if (ioa_cfg->in_reset_reload) {
3292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3293 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3294
3295 /* Wait for a second for any errors to be logged */
3296 msleep(1000);
3297 } else {
3298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299 return -EIO;
3300 }
3301
3302 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3303 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3304 rc = -EIO;
3305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3306
3307 return rc;
3308}
3309
Tony Jonesee959b02008-02-22 00:13:36 +01003310static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 .attr = {
3312 .name = "run_diagnostics",
3313 .mode = S_IWUSR,
3314 },
3315 .store = ipr_store_diagnostics
3316};
3317
3318/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003319 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003320 * @class_dev: device struct
3321 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003322 *
3323 * Return value:
3324 * number of bytes printed to buffer
3325 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003326static ssize_t ipr_show_adapter_state(struct device *dev,
3327 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003328{
Tony Jonesee959b02008-02-22 00:13:36 +01003329 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003330 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3331 unsigned long lock_flags = 0;
3332 int len;
3333
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335 if (ioa_cfg->ioa_is_dead)
3336 len = snprintf(buf, PAGE_SIZE, "offline\n");
3337 else
3338 len = snprintf(buf, PAGE_SIZE, "online\n");
3339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340 return len;
3341}
3342
3343/**
3344 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003345 * @dev: device struct
3346 * @buf: buffer
3347 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003348 *
3349 * This function will change the adapter's state.
3350 *
3351 * Return value:
3352 * count on success / other on failure
3353 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003354static ssize_t ipr_store_adapter_state(struct device *dev,
3355 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003356 const char *buf, size_t count)
3357{
Tony Jonesee959b02008-02-22 00:13:36 +01003358 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003359 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360 unsigned long lock_flags;
3361 int result = count;
3362
3363 if (!capable(CAP_SYS_ADMIN))
3364 return -EACCES;
3365
3366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3368 ioa_cfg->ioa_is_dead = 0;
3369 ioa_cfg->reset_retries = 0;
3370 ioa_cfg->in_ioa_bringdown = 0;
3371 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3372 }
3373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3375
3376 return result;
3377}
3378
Tony Jonesee959b02008-02-22 00:13:36 +01003379static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003380 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003381 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003382 .mode = S_IRUGO | S_IWUSR,
3383 },
3384 .show = ipr_show_adapter_state,
3385 .store = ipr_store_adapter_state
3386};
3387
3388/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003390 * @dev: device struct
3391 * @buf: buffer
3392 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 *
3394 * This function will reset the adapter.
3395 *
3396 * Return value:
3397 * count on success / other on failure
3398 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003399static ssize_t ipr_store_reset_adapter(struct device *dev,
3400 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 const char *buf, size_t count)
3402{
Tony Jonesee959b02008-02-22 00:13:36 +01003403 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3405 unsigned long lock_flags;
3406 int result = count;
3407
3408 if (!capable(CAP_SYS_ADMIN))
3409 return -EACCES;
3410
3411 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3412 if (!ioa_cfg->in_reset_reload)
3413 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3415 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3416
3417 return result;
3418}
3419
Tony Jonesee959b02008-02-22 00:13:36 +01003420static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 .attr = {
3422 .name = "reset_host",
3423 .mode = S_IWUSR,
3424 },
3425 .store = ipr_store_reset_adapter
3426};
3427
3428/**
3429 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3430 * @buf_len: buffer length
3431 *
3432 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3433 * list to use for microcode download
3434 *
3435 * Return value:
3436 * pointer to sglist / NULL on failure
3437 **/
3438static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3439{
3440 int sg_size, order, bsize_elem, num_elem, i, j;
3441 struct ipr_sglist *sglist;
3442 struct scatterlist *scatterlist;
3443 struct page *page;
3444
3445 /* Get the minimum size per scatter/gather element */
3446 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3447
3448 /* Get the actual size per element */
3449 order = get_order(sg_size);
3450
3451 /* Determine the actual number of bytes per element */
3452 bsize_elem = PAGE_SIZE * (1 << order);
3453
3454 /* Determine the actual number of sg entries needed */
3455 if (buf_len % bsize_elem)
3456 num_elem = (buf_len / bsize_elem) + 1;
3457 else
3458 num_elem = buf_len / bsize_elem;
3459
3460 /* Allocate a scatter/gather list for the DMA */
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003461 sglist = kzalloc(sizeof(struct ipr_sglist) +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 (sizeof(struct scatterlist) * (num_elem - 1)),
3463 GFP_KERNEL);
3464
3465 if (sglist == NULL) {
3466 ipr_trace;
3467 return NULL;
3468 }
3469
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 scatterlist = sglist->scatterlist;
Jens Axboe45711f12007-10-22 21:19:53 +02003471 sg_init_table(scatterlist, num_elem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
3473 sglist->order = order;
3474 sglist->num_sg = num_elem;
3475
3476 /* Allocate a bunch of sg elements */
3477 for (i = 0; i < num_elem; i++) {
3478 page = alloc_pages(GFP_KERNEL, order);
3479 if (!page) {
3480 ipr_trace;
3481
3482 /* Free up what we already allocated */
3483 for (j = i - 1; j >= 0; j--)
Jens Axboe45711f12007-10-22 21:19:53 +02003484 __free_pages(sg_page(&scatterlist[j]), order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 kfree(sglist);
3486 return NULL;
3487 }
3488
Jens Axboe642f1492007-10-24 11:20:47 +02003489 sg_set_page(&scatterlist[i], page, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 }
3491
3492 return sglist;
3493}
3494
3495/**
3496 * ipr_free_ucode_buffer - Frees a microcode download buffer
3497 * @p_dnld: scatter/gather list pointer
3498 *
3499 * Free a DMA'able ucode download buffer previously allocated with
3500 * ipr_alloc_ucode_buffer
3501 *
3502 * Return value:
3503 * nothing
3504 **/
3505static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3506{
3507 int i;
3508
3509 for (i = 0; i < sglist->num_sg; i++)
Jens Axboe45711f12007-10-22 21:19:53 +02003510 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511
3512 kfree(sglist);
3513}
3514
3515/**
3516 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3517 * @sglist: scatter/gather list pointer
3518 * @buffer: buffer pointer
3519 * @len: buffer length
3520 *
3521 * Copy a microcode image from a user buffer into a buffer allocated by
3522 * ipr_alloc_ucode_buffer
3523 *
3524 * Return value:
3525 * 0 on success / other on failure
3526 **/
3527static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3528 u8 *buffer, u32 len)
3529{
3530 int bsize_elem, i, result = 0;
3531 struct scatterlist *scatterlist;
3532 void *kaddr;
3533
3534 /* Determine the actual number of bytes per element */
3535 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3536
3537 scatterlist = sglist->scatterlist;
3538
3539 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003540 struct page *page = sg_page(&scatterlist[i]);
3541
3542 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003544 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545
3546 scatterlist[i].length = bsize_elem;
3547
3548 if (result != 0) {
3549 ipr_trace;
3550 return result;
3551 }
3552 }
3553
3554 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003555 struct page *page = sg_page(&scatterlist[i]);
3556
3557 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003559 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560
3561 scatterlist[i].length = len % bsize_elem;
3562 }
3563
3564 sglist->buffer_len = len;
3565 return result;
3566}
3567
3568/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003569 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3570 * @ipr_cmd: ipr command struct
3571 * @sglist: scatter/gather list
3572 *
3573 * Builds a microcode download IOA data list (IOADL).
3574 *
3575 **/
3576static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3577 struct ipr_sglist *sglist)
3578{
3579 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3580 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3581 struct scatterlist *scatterlist = sglist->scatterlist;
3582 int i;
3583
3584 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3585 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3586 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3587
3588 ioarcb->ioadl_len =
3589 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3590 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3591 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3592 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3593 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3594 }
3595
3596 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3597}
3598
3599/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003600 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 * @ipr_cmd: ipr command struct
3602 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003604 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003607static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3608 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003611 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 struct scatterlist *scatterlist = sglist->scatterlist;
3613 int i;
3614
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003615 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003617 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3618
3619 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3621
3622 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3623 ioadl[i].flags_and_data_len =
3624 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3625 ioadl[i].address =
3626 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3627 }
3628
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003629 ioadl[i-1].flags_and_data_len |=
3630 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3631}
3632
3633/**
3634 * ipr_update_ioa_ucode - Update IOA's microcode
3635 * @ioa_cfg: ioa config struct
3636 * @sglist: scatter/gather list
3637 *
3638 * Initiate an adapter reset to update the IOA's microcode
3639 *
3640 * Return value:
3641 * 0 on success / -EIO on failure
3642 **/
3643static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3644 struct ipr_sglist *sglist)
3645{
3646 unsigned long lock_flags;
3647
3648 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05003649 while(ioa_cfg->in_reset_reload) {
3650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3651 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3652 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3653 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003654
3655 if (ioa_cfg->ucode_sglist) {
3656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3657 dev_err(&ioa_cfg->pdev->dev,
3658 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 return -EIO;
3660 }
3661
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003662 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3663 sglist->num_sg, DMA_TO_DEVICE);
3664
3665 if (!sglist->num_dma_sg) {
3666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667 dev_err(&ioa_cfg->pdev->dev,
3668 "Failed to map microcode download buffer!\n");
3669 return -EIO;
3670 }
3671
3672 ioa_cfg->ucode_sglist = sglist;
3673 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3675 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3676
3677 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3678 ioa_cfg->ucode_sglist = NULL;
3679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 return 0;
3681}
3682
3683/**
3684 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003685 * @class_dev: device struct
3686 * @buf: buffer
3687 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 *
3689 * This function will update the firmware on the adapter.
3690 *
3691 * Return value:
3692 * count on success / other on failure
3693 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003694static ssize_t ipr_store_update_fw(struct device *dev,
3695 struct device_attribute *attr,
3696 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697{
Tony Jonesee959b02008-02-22 00:13:36 +01003698 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700 struct ipr_ucode_image_header *image_hdr;
3701 const struct firmware *fw_entry;
3702 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 char fname[100];
3704 char *src;
3705 int len, result, dnld_size;
3706
3707 if (!capable(CAP_SYS_ADMIN))
3708 return -EACCES;
3709
3710 len = snprintf(fname, 99, "%s", buf);
3711 fname[len-1] = '\0';
3712
3713 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3714 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3715 return -EIO;
3716 }
3717
3718 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3719
3720 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3721 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3722 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3723 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3724 release_firmware(fw_entry);
3725 return -EINVAL;
3726 }
3727
3728 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3729 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3730 sglist = ipr_alloc_ucode_buffer(dnld_size);
3731
3732 if (!sglist) {
3733 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3734 release_firmware(fw_entry);
3735 return -ENOMEM;
3736 }
3737
3738 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3739
3740 if (result) {
3741 dev_err(&ioa_cfg->pdev->dev,
3742 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003743 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 }
3745
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003746 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003748 if (!result)
3749 result = count;
3750out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 ipr_free_ucode_buffer(sglist);
3752 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003753 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754}
3755
Tony Jonesee959b02008-02-22 00:13:36 +01003756static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 .attr = {
3758 .name = "update_fw",
3759 .mode = S_IWUSR,
3760 },
3761 .store = ipr_store_update_fw
3762};
3763
Tony Jonesee959b02008-02-22 00:13:36 +01003764static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 &ipr_fw_version_attr,
3766 &ipr_log_level_attr,
3767 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003768 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 &ipr_ioa_reset_attr,
3770 &ipr_update_fw_attr,
3771 NULL,
3772};
3773
3774#ifdef CONFIG_SCSI_IPR_DUMP
3775/**
3776 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07003777 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003779 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 * @buf: buffer
3781 * @off: offset
3782 * @count: buffer size
3783 *
3784 * Return value:
3785 * number of bytes printed to buffer
3786 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003787static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003788 struct bin_attribute *bin_attr,
3789 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790{
Tony Jonesee959b02008-02-22 00:13:36 +01003791 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 struct Scsi_Host *shost = class_to_shost(cdev);
3793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3794 struct ipr_dump *dump;
3795 unsigned long lock_flags = 0;
3796 char *src;
3797 int len;
3798 size_t rc = count;
3799
3800 if (!capable(CAP_SYS_ADMIN))
3801 return -EACCES;
3802
3803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3804 dump = ioa_cfg->dump;
3805
3806 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3808 return 0;
3809 }
3810 kref_get(&dump->kref);
3811 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3812
3813 if (off > dump->driver_dump.hdr.len) {
3814 kref_put(&dump->kref, ipr_release_dump);
3815 return 0;
3816 }
3817
3818 if (off + count > dump->driver_dump.hdr.len) {
3819 count = dump->driver_dump.hdr.len - off;
3820 rc = count;
3821 }
3822
3823 if (count && off < sizeof(dump->driver_dump)) {
3824 if (off + count > sizeof(dump->driver_dump))
3825 len = sizeof(dump->driver_dump) - off;
3826 else
3827 len = count;
3828 src = (u8 *)&dump->driver_dump + off;
3829 memcpy(buf, src, len);
3830 buf += len;
3831 off += len;
3832 count -= len;
3833 }
3834
3835 off -= sizeof(dump->driver_dump);
3836
3837 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3838 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3839 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3840 else
3841 len = count;
3842 src = (u8 *)&dump->ioa_dump + off;
3843 memcpy(buf, src, len);
3844 buf += len;
3845 off += len;
3846 count -= len;
3847 }
3848
3849 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3850
3851 while (count) {
3852 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3853 len = PAGE_ALIGN(off) - off;
3854 else
3855 len = count;
3856 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3857 src += off & ~PAGE_MASK;
3858 memcpy(buf, src, len);
3859 buf += len;
3860 off += len;
3861 count -= len;
3862 }
3863
3864 kref_put(&dump->kref, ipr_release_dump);
3865 return rc;
3866}
3867
3868/**
3869 * ipr_alloc_dump - Prepare for adapter dump
3870 * @ioa_cfg: ioa config struct
3871 *
3872 * Return value:
3873 * 0 on success / other on failure
3874 **/
3875static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3876{
3877 struct ipr_dump *dump;
3878 unsigned long lock_flags = 0;
3879
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06003880 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881
3882 if (!dump) {
3883 ipr_err("Dump memory allocation failed\n");
3884 return -ENOMEM;
3885 }
3886
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 kref_init(&dump->kref);
3888 dump->ioa_cfg = ioa_cfg;
3889
3890 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3891
3892 if (INACTIVE != ioa_cfg->sdt_state) {
3893 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894 kfree(dump);
3895 return 0;
3896 }
3897
3898 ioa_cfg->dump = dump;
3899 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3900 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3901 ioa_cfg->dump_taken = 1;
3902 schedule_work(&ioa_cfg->work_q);
3903 }
3904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3905
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906 return 0;
3907}
3908
3909/**
3910 * ipr_free_dump - Free adapter dump memory
3911 * @ioa_cfg: ioa config struct
3912 *
3913 * Return value:
3914 * 0 on success / other on failure
3915 **/
3916static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3917{
3918 struct ipr_dump *dump;
3919 unsigned long lock_flags = 0;
3920
3921 ENTER;
3922
3923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3924 dump = ioa_cfg->dump;
3925 if (!dump) {
3926 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3927 return 0;
3928 }
3929
3930 ioa_cfg->dump = NULL;
3931 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3932
3933 kref_put(&dump->kref, ipr_release_dump);
3934
3935 LEAVE;
3936 return 0;
3937}
3938
3939/**
3940 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07003941 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003943 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 * @buf: buffer
3945 * @off: offset
3946 * @count: buffer size
3947 *
3948 * Return value:
3949 * number of bytes printed to buffer
3950 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003951static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003952 struct bin_attribute *bin_attr,
3953 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954{
Tony Jonesee959b02008-02-22 00:13:36 +01003955 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 struct Scsi_Host *shost = class_to_shost(cdev);
3957 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3958 int rc;
3959
3960 if (!capable(CAP_SYS_ADMIN))
3961 return -EACCES;
3962
3963 if (buf[0] == '1')
3964 rc = ipr_alloc_dump(ioa_cfg);
3965 else if (buf[0] == '0')
3966 rc = ipr_free_dump(ioa_cfg);
3967 else
3968 return -EINVAL;
3969
3970 if (rc)
3971 return rc;
3972 else
3973 return count;
3974}
3975
3976static struct bin_attribute ipr_dump_attr = {
3977 .attr = {
3978 .name = "dump",
3979 .mode = S_IRUSR | S_IWUSR,
3980 },
3981 .size = 0,
3982 .read = ipr_read_dump,
3983 .write = ipr_write_dump
3984};
3985#else
3986static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3987#endif
3988
3989/**
3990 * ipr_change_queue_depth - Change the device's queue depth
3991 * @sdev: scsi device struct
3992 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07003993 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 *
3995 * Return value:
3996 * actual depth set
3997 **/
Mike Christiee881a172009-10-15 17:46:39 -07003998static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3999 int reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000{
Brian King35a39692006-09-25 12:39:20 -05004001 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4002 struct ipr_resource_entry *res;
4003 unsigned long lock_flags = 0;
4004
Mike Christiee881a172009-10-15 17:46:39 -07004005 if (reason != SCSI_QDEPTH_DEFAULT)
4006 return -EOPNOTSUPP;
4007
Brian King35a39692006-09-25 12:39:20 -05004008 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4009 res = (struct ipr_resource_entry *)sdev->hostdata;
4010
4011 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4012 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4016 return sdev->queue_depth;
4017}
4018
4019/**
4020 * ipr_change_queue_type - Change the device's queue type
4021 * @dsev: scsi device struct
4022 * @tag_type: type of tags to use
4023 *
4024 * Return value:
4025 * actual queue type set
4026 **/
4027static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4028{
4029 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4030 struct ipr_resource_entry *res;
4031 unsigned long lock_flags = 0;
4032
4033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4034 res = (struct ipr_resource_entry *)sdev->hostdata;
4035
4036 if (res) {
4037 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4038 /*
4039 * We don't bother quiescing the device here since the
4040 * adapter firmware does it for us.
4041 */
4042 scsi_set_tag_type(sdev, tag_type);
4043
4044 if (tag_type)
4045 scsi_activate_tcq(sdev, sdev->queue_depth);
4046 else
4047 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4048 } else
4049 tag_type = 0;
4050 } else
4051 tag_type = 0;
4052
4053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054 return tag_type;
4055}
4056
4057/**
4058 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4059 * @dev: device struct
4060 * @buf: buffer
4061 *
4062 * Return value:
4063 * number of bytes printed to buffer
4064 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004065static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066{
4067 struct scsi_device *sdev = to_scsi_device(dev);
4068 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4069 struct ipr_resource_entry *res;
4070 unsigned long lock_flags = 0;
4071 ssize_t len = -ENXIO;
4072
4073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4074 res = (struct ipr_resource_entry *)sdev->hostdata;
4075 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004076 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4078 return len;
4079}
4080
4081static struct device_attribute ipr_adapter_handle_attr = {
4082 .attr = {
4083 .name = "adapter_handle",
4084 .mode = S_IRUSR,
4085 },
4086 .show = ipr_show_adapter_handle
4087};
4088
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004089/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004090 * ipr_show_resource_path - Show the resource path or the resource address for
4091 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004092 * @dev: device struct
4093 * @buf: buffer
4094 *
4095 * Return value:
4096 * number of bytes printed to buffer
4097 **/
4098static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4099{
4100 struct scsi_device *sdev = to_scsi_device(dev);
4101 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4102 struct ipr_resource_entry *res;
4103 unsigned long lock_flags = 0;
4104 ssize_t len = -ENXIO;
4105 char buffer[IPR_MAX_RES_PATH_LENGTH];
4106
4107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4108 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004109 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004110 len = snprintf(buf, PAGE_SIZE, "%s\n",
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004111 ipr_format_res_path(res->res_path, buffer,
4112 sizeof(buffer)));
4113 else if (res)
4114 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4115 res->bus, res->target, res->lun);
4116
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118 return len;
4119}
4120
4121static struct device_attribute ipr_resource_path_attr = {
4122 .attr = {
4123 .name = "resource_path",
4124 .mode = S_IRUSR,
4125 },
4126 .show = ipr_show_resource_path
4127};
4128
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129static struct device_attribute *ipr_dev_attrs[] = {
4130 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004131 &ipr_resource_path_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 NULL,
4133};
4134
4135/**
4136 * ipr_biosparam - Return the HSC mapping
4137 * @sdev: scsi device struct
4138 * @block_device: block device pointer
4139 * @capacity: capacity of the device
4140 * @parm: Array containing returned HSC values.
4141 *
4142 * This function generates the HSC parms that fdisk uses.
4143 * We want to make sure we return something that places partitions
4144 * on 4k boundaries for best performance with the IOA.
4145 *
4146 * Return value:
4147 * 0 on success
4148 **/
4149static int ipr_biosparam(struct scsi_device *sdev,
4150 struct block_device *block_device,
4151 sector_t capacity, int *parm)
4152{
4153 int heads, sectors;
4154 sector_t cylinders;
4155
4156 heads = 128;
4157 sectors = 32;
4158
4159 cylinders = capacity;
4160 sector_div(cylinders, (128 * 32));
4161
4162 /* return result */
4163 parm[0] = heads;
4164 parm[1] = sectors;
4165 parm[2] = cylinders;
4166
4167 return 0;
4168}
4169
4170/**
Brian King35a39692006-09-25 12:39:20 -05004171 * ipr_find_starget - Find target based on bus/target.
4172 * @starget: scsi target struct
4173 *
4174 * Return value:
4175 * resource entry pointer if found / NULL if not found
4176 **/
4177static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4178{
4179 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4180 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4181 struct ipr_resource_entry *res;
4182
4183 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004184 if ((res->bus == starget->channel) &&
4185 (res->target == starget->id) &&
4186 (res->lun == 0)) {
Brian King35a39692006-09-25 12:39:20 -05004187 return res;
4188 }
4189 }
4190
4191 return NULL;
4192}
4193
4194static struct ata_port_info sata_port_info;
4195
4196/**
4197 * ipr_target_alloc - Prepare for commands to a SCSI target
4198 * @starget: scsi target struct
4199 *
4200 * If the device is a SATA device, this function allocates an
4201 * ATA port with libata, else it does nothing.
4202 *
4203 * Return value:
4204 * 0 on success / non-0 on failure
4205 **/
4206static int ipr_target_alloc(struct scsi_target *starget)
4207{
4208 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4209 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4210 struct ipr_sata_port *sata_port;
4211 struct ata_port *ap;
4212 struct ipr_resource_entry *res;
4213 unsigned long lock_flags;
4214
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216 res = ipr_find_starget(starget);
4217 starget->hostdata = NULL;
4218
4219 if (res && ipr_is_gata(res)) {
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4221 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4222 if (!sata_port)
4223 return -ENOMEM;
4224
4225 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4226 if (ap) {
4227 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4228 sata_port->ioa_cfg = ioa_cfg;
4229 sata_port->ap = ap;
4230 sata_port->res = res;
4231
4232 res->sata_port = sata_port;
4233 ap->private_data = sata_port;
4234 starget->hostdata = sata_port;
4235 } else {
4236 kfree(sata_port);
4237 return -ENOMEM;
4238 }
4239 }
4240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4241
4242 return 0;
4243}
4244
4245/**
4246 * ipr_target_destroy - Destroy a SCSI target
4247 * @starget: scsi target struct
4248 *
4249 * If the device was a SATA device, this function frees the libata
4250 * ATA port, else it does nothing.
4251 *
4252 **/
4253static void ipr_target_destroy(struct scsi_target *starget)
4254{
4255 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004256 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4257 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4258
4259 if (ioa_cfg->sis64) {
4260 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4261 clear_bit(starget->id, ioa_cfg->array_ids);
4262 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4263 clear_bit(starget->id, ioa_cfg->vset_ids);
4264 else if (starget->channel == 0)
4265 clear_bit(starget->id, ioa_cfg->target_ids);
4266 }
Brian King35a39692006-09-25 12:39:20 -05004267
4268 if (sata_port) {
4269 starget->hostdata = NULL;
4270 ata_sas_port_destroy(sata_port->ap);
4271 kfree(sata_port);
4272 }
4273}
4274
4275/**
4276 * ipr_find_sdev - Find device based on bus/target/lun.
4277 * @sdev: scsi device struct
4278 *
4279 * Return value:
4280 * resource entry pointer if found / NULL if not found
4281 **/
4282static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4283{
4284 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4285 struct ipr_resource_entry *res;
4286
4287 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004288 if ((res->bus == sdev->channel) &&
4289 (res->target == sdev->id) &&
4290 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004291 return res;
4292 }
4293
4294 return NULL;
4295}
4296
4297/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 * ipr_slave_destroy - Unconfigure a SCSI device
4299 * @sdev: scsi device struct
4300 *
4301 * Return value:
4302 * nothing
4303 **/
4304static void ipr_slave_destroy(struct scsi_device *sdev)
4305{
4306 struct ipr_resource_entry *res;
4307 struct ipr_ioa_cfg *ioa_cfg;
4308 unsigned long lock_flags = 0;
4309
4310 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4311
4312 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4313 res = (struct ipr_resource_entry *) sdev->hostdata;
4314 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004315 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004316 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 sdev->hostdata = NULL;
4318 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004319 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 }
4321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4322}
4323
4324/**
4325 * ipr_slave_configure - Configure a SCSI device
4326 * @sdev: scsi device struct
4327 *
4328 * This function configures the specified scsi device.
4329 *
4330 * Return value:
4331 * 0 on success
4332 **/
4333static int ipr_slave_configure(struct scsi_device *sdev)
4334{
4335 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4336 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004337 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004339 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340
4341 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4342 res = sdev->hostdata;
4343 if (res) {
4344 if (ipr_is_af_dasd_device(res))
4345 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004346 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004348 sdev->no_uld_attach = 1;
4349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 if (ipr_is_vset_device(res)) {
Jens Axboe242f9dc2008-09-14 05:55:09 -07004351 blk_queue_rq_timeout(sdev->request_queue,
4352 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004353 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 }
Brian Kinge4fbf442006-03-29 09:37:22 -06004355 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 sdev->allow_restart = 1;
Brian Kingdd406ef2009-04-22 08:58:02 -05004357 if (ipr_is_gata(res) && res->sata_port)
4358 ap = res->sata_port->ap;
4359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4360
4361 if (ap) {
Brian King35a39692006-09-25 12:39:20 -05004362 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004363 ata_sas_slave_configure(sdev, ap);
4364 } else
Brian King35a39692006-09-25 12:39:20 -05004365 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004366 if (ioa_cfg->sis64)
4367 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004368 ipr_format_res_path(res->res_path, buffer,
4369 sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004370 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 }
4372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373 return 0;
4374}
4375
4376/**
Brian King35a39692006-09-25 12:39:20 -05004377 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4378 * @sdev: scsi device struct
4379 *
4380 * This function initializes an ATA port so that future commands
4381 * sent through queuecommand will work.
4382 *
4383 * Return value:
4384 * 0 on success
4385 **/
4386static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4387{
4388 struct ipr_sata_port *sata_port = NULL;
4389 int rc = -ENXIO;
4390
4391 ENTER;
4392 if (sdev->sdev_target)
4393 sata_port = sdev->sdev_target->hostdata;
4394 if (sata_port)
4395 rc = ata_sas_port_init(sata_port->ap);
4396 if (rc)
4397 ipr_slave_destroy(sdev);
4398
4399 LEAVE;
4400 return rc;
4401}
4402
4403/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 * ipr_slave_alloc - Prepare for commands to a device.
4405 * @sdev: scsi device struct
4406 *
4407 * This function saves a pointer to the resource entry
4408 * in the scsi device struct if the device exists. We
4409 * can then use this pointer in ipr_queuecommand when
4410 * handling new commands.
4411 *
4412 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004413 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 **/
4415static int ipr_slave_alloc(struct scsi_device *sdev)
4416{
4417 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4418 struct ipr_resource_entry *res;
4419 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004420 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421
4422 sdev->hostdata = NULL;
4423
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425
Brian King35a39692006-09-25 12:39:20 -05004426 res = ipr_find_sdev(sdev);
4427 if (res) {
4428 res->sdev = sdev;
4429 res->add_to_ml = 0;
4430 res->in_erp = 0;
4431 sdev->hostdata = res;
4432 if (!ipr_is_naca_model(res))
4433 res->needs_sync_complete = 1;
4434 rc = 0;
4435 if (ipr_is_gata(res)) {
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 }
4439 }
4440
4441 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4442
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004443 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444}
4445
4446/**
4447 * ipr_eh_host_reset - Reset the host adapter
4448 * @scsi_cmd: scsi command struct
4449 *
4450 * Return value:
4451 * SUCCESS / FAILED
4452 **/
Jeff Garzik df0ae242005-05-28 07:57:14 -04004453static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454{
4455 struct ipr_ioa_cfg *ioa_cfg;
4456 int rc;
4457
4458 ENTER;
4459 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4460
4461 dev_err(&ioa_cfg->pdev->dev,
4462 "Adapter being reset as a result of error recovery.\n");
4463
4464 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4465 ioa_cfg->sdt_state = GET_DUMP;
4466
4467 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4468
4469 LEAVE;
4470 return rc;
4471}
4472
Jeff Garzik df0ae242005-05-28 07:57:14 -04004473static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4474{
4475 int rc;
4476
4477 spin_lock_irq(cmd->device->host->host_lock);
4478 rc = __ipr_eh_host_reset(cmd);
4479 spin_unlock_irq(cmd->device->host->host_lock);
4480
4481 return rc;
4482}
4483
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484/**
Brian Kingc6513092006-03-29 09:37:43 -06004485 * ipr_device_reset - Reset the device
4486 * @ioa_cfg: ioa config struct
4487 * @res: resource entry struct
4488 *
4489 * This function issues a device reset to the affected device.
4490 * If the device is a SCSI device, a LUN reset will be sent
4491 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05004492 * will be sent. If the device is a SATA device, a PHY reset will
4493 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06004494 *
4495 * Return value:
4496 * 0 on success / non-zero on failure
4497 **/
4498static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4499 struct ipr_resource_entry *res)
4500{
4501 struct ipr_cmnd *ipr_cmd;
4502 struct ipr_ioarcb *ioarcb;
4503 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05004504 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06004505 u32 ioasc;
4506
4507 ENTER;
4508 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4509 ioarcb = &ipr_cmd->ioarcb;
4510 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08004511
4512 if (ipr_cmd->ioa_cfg->sis64) {
4513 regs = &ipr_cmd->i.ata_ioadl.regs;
4514 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4515 } else
4516 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06004517
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004518 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06004519 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4520 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05004521 if (ipr_is_gata(res)) {
4522 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08004523 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05004524 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4525 }
Brian Kingc6513092006-03-29 09:37:43 -06004526
4527 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004528 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingc6513092006-03-29 09:37:43 -06004529 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004530 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4531 if (ipr_cmd->ioa_cfg->sis64)
4532 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4533 sizeof(struct ipr_ioasa_gata));
4534 else
4535 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4536 sizeof(struct ipr_ioasa_gata));
4537 }
Brian Kingc6513092006-03-29 09:37:43 -06004538
4539 LEAVE;
4540 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4541}
4542
4543/**
Brian King35a39692006-09-25 12:39:20 -05004544 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09004545 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05004546 * @classes: class of the attached device
4547 *
Tejun Heocc0680a2007-08-06 18:36:23 +09004548 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05004549 *
4550 * Return value:
4551 * 0 on success / non-zero on failure
4552 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09004553static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07004554 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05004555{
Tejun Heocc0680a2007-08-06 18:36:23 +09004556 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05004557 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4558 struct ipr_resource_entry *res;
4559 unsigned long lock_flags = 0;
4560 int rc = -ENXIO;
4561
4562 ENTER;
4563 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King73d98ff2006-11-21 10:27:58 -06004564 while(ioa_cfg->in_reset_reload) {
4565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4567 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4568 }
4569
Brian King35a39692006-09-25 12:39:20 -05004570 res = sata_port->res;
4571 if (res) {
4572 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004573 *classes = res->ata_class;
Brian King35a39692006-09-25 12:39:20 -05004574 }
4575
4576 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4577 LEAVE;
4578 return rc;
4579}
4580
4581/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 * ipr_eh_dev_reset - Reset the device
4583 * @scsi_cmd: scsi command struct
4584 *
4585 * This function issues a device reset to the affected device.
4586 * A LUN reset will be sent to the device first. If that does
4587 * not work, a target reset will be sent.
4588 *
4589 * Return value:
4590 * SUCCESS / FAILED
4591 **/
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004592static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593{
4594 struct ipr_cmnd *ipr_cmd;
4595 struct ipr_ioa_cfg *ioa_cfg;
4596 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05004597 struct ata_port *ap;
4598 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599
4600 ENTER;
4601 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4602 res = scsi_cmd->device->hostdata;
4603
brking@us.ibm.comeeb883072005-11-01 17:02:29 -06004604 if (!res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605 return FAILED;
4606
4607 /*
4608 * If we are currently going through reset/reload, return failed. This will force the
4609 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4610 * reset to complete
4611 */
4612 if (ioa_cfg->in_reset_reload)
4613 return FAILED;
4614 if (ioa_cfg->ioa_is_dead)
4615 return FAILED;
4616
4617 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004618 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 if (ipr_cmd->scsi_cmd)
4620 ipr_cmd->done = ipr_scsi_eh_done;
Brian King24d6f2b2007-03-29 12:43:17 -05004621 if (ipr_cmd->qc)
4622 ipr_cmd->done = ipr_sata_eh_done;
Brian King7402ece2006-11-21 10:28:23 -06004623 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4624 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4625 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 }
4628 }
4629
4630 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004631 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05004632
4633 if (ipr_is_gata(res) && res->sata_port) {
4634 ap = res->sata_port->ap;
4635 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09004636 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05004637 spin_lock_irq(scsi_cmd->device->host->host_lock);
Brian King5af23d22007-05-09 15:36:35 -05004638
4639 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004640 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King5af23d22007-05-09 15:36:35 -05004641 rc = -EIO;
4642 break;
4643 }
4644 }
Brian King35a39692006-09-25 12:39:20 -05004645 } else
4646 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647 res->resetting_device = 0;
4648
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649 LEAVE;
Brian Kingc6513092006-03-29 09:37:43 -06004650 return (rc ? FAILED : SUCCESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651}
4652
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04004653static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4654{
4655 int rc;
4656
4657 spin_lock_irq(cmd->device->host->host_lock);
4658 rc = __ipr_eh_dev_reset(cmd);
4659 spin_unlock_irq(cmd->device->host->host_lock);
4660
4661 return rc;
4662}
4663
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664/**
4665 * ipr_bus_reset_done - Op done function for bus reset.
4666 * @ipr_cmd: ipr command struct
4667 *
4668 * This function is the op done function for a bus reset
4669 *
4670 * Return value:
4671 * none
4672 **/
4673static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4674{
4675 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4676 struct ipr_resource_entry *res;
4677
4678 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004679 if (!ioa_cfg->sis64)
4680 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4681 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4682 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4683 break;
4684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686
4687 /*
4688 * If abort has not completed, indicate the reset has, else call the
4689 * abort's done function to wake the sleeping eh thread
4690 */
4691 if (ipr_cmd->sibling->sibling)
4692 ipr_cmd->sibling->sibling = NULL;
4693 else
4694 ipr_cmd->sibling->done(ipr_cmd->sibling);
4695
4696 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4697 LEAVE;
4698}
4699
4700/**
4701 * ipr_abort_timeout - An abort task has timed out
4702 * @ipr_cmd: ipr command struct
4703 *
4704 * This function handles when an abort task times out. If this
4705 * happens we issue a bus reset since we have resources tied
4706 * up that must be freed before returning to the midlayer.
4707 *
4708 * Return value:
4709 * none
4710 **/
4711static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4712{
4713 struct ipr_cmnd *reset_cmd;
4714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4715 struct ipr_cmd_pkt *cmd_pkt;
4716 unsigned long lock_flags = 0;
4717
4718 ENTER;
4719 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4720 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722 return;
4723 }
4724
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004725 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4727 ipr_cmd->sibling = reset_cmd;
4728 reset_cmd->sibling = ipr_cmd;
4729 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4730 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4731 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4732 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4733 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4734
4735 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4737 LEAVE;
4738}
4739
4740/**
4741 * ipr_cancel_op - Cancel specified op
4742 * @scsi_cmd: scsi command struct
4743 *
4744 * This function cancels specified op.
4745 *
4746 * Return value:
4747 * SUCCESS / FAILED
4748 **/
4749static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4750{
4751 struct ipr_cmnd *ipr_cmd;
4752 struct ipr_ioa_cfg *ioa_cfg;
4753 struct ipr_resource_entry *res;
4754 struct ipr_cmd_pkt *cmd_pkt;
4755 u32 ioasc;
4756 int op_found = 0;
4757
4758 ENTER;
4759 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4760 res = scsi_cmd->device->hostdata;
4761
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004762 /* If we are currently going through reset/reload, return failed.
4763 * This will force the mid-layer to call ipr_eh_host_reset,
4764 * which will then go to sleep and wait for the reset to complete
4765 */
4766 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4767 return FAILED;
Brian King04d97682006-11-21 10:28:04 -06004768 if (!res || !ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 return FAILED;
4770
4771 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4772 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4773 ipr_cmd->done = ipr_scsi_eh_done;
4774 op_found = 1;
4775 break;
4776 }
4777 }
4778
4779 if (!op_found)
4780 return SUCCESS;
4781
4782 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004783 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4785 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4786 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4787 ipr_cmd->u.sdev = scsi_cmd->device;
4788
Brian Kingfb3ed3c2006-03-29 09:37:37 -06004789 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4790 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07004792 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793
4794 /*
4795 * If the abort task timed out and we sent a bus reset, we will get
4796 * one the following responses to the abort
4797 */
4798 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4799 ioasc = 0;
4800 ipr_trace;
4801 }
4802
4803 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06004804 if (!ipr_is_naca_model(res))
4805 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806
4807 LEAVE;
4808 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4809}
4810
4811/**
4812 * ipr_eh_abort - Abort a single op
4813 * @scsi_cmd: scsi command struct
4814 *
4815 * Return value:
4816 * SUCCESS / FAILED
4817 **/
4818static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4819{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004820 unsigned long flags;
4821 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822
4823 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004825 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4826 rc = ipr_cancel_op(scsi_cmd);
4827 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828
4829 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04004830 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831}
4832
4833/**
4834 * ipr_handle_other_interrupt - Handle "other" interrupts
4835 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 *
4837 * Return value:
4838 * IRQ_NONE / IRQ_HANDLED
4839 **/
Wayne Boyer64ffdb72010-05-19 11:56:13 -07004840static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841{
4842 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer64ffdb72010-05-19 11:56:13 -07004843 volatile u32 int_reg, int_mask_reg;
4844
4845 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4847
4848 /* If an interrupt on the adapter did not occur, ignore it.
4849 * Or in the case of SIS 64, check for a stage change interrupt.
4850 */
4851 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4852 if (ioa_cfg->sis64) {
4853 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4854 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4855 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4856
4857 /* clear stage change */
4858 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4859 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4860 list_del(&ioa_cfg->reset_cmd->queue);
4861 del_timer(&ioa_cfg->reset_cmd->timer);
4862 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4863 return IRQ_HANDLED;
4864 }
4865 }
4866
4867 return IRQ_NONE;
4868 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869
4870 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4871 /* Mask the interrupt */
4872 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4873
4874 /* Clear the interrupt */
4875 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4876 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4877
4878 list_del(&ioa_cfg->reset_cmd->queue);
4879 del_timer(&ioa_cfg->reset_cmd->timer);
4880 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4881 } else {
4882 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4883 ioa_cfg->ioa_unit_checked = 1;
4884 else
4885 dev_err(&ioa_cfg->pdev->dev,
4886 "Permanent IOA failure. 0x%08X\n", int_reg);
4887
4888 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4889 ioa_cfg->sdt_state = GET_DUMP;
4890
4891 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4892 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4893 }
4894
4895 return rc;
4896}
4897
4898/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004899 * ipr_isr_eh - Interrupt service routine error handler
4900 * @ioa_cfg: ioa config struct
4901 * @msg: message to log
4902 *
4903 * Return value:
4904 * none
4905 **/
4906static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4907{
4908 ioa_cfg->errors_logged++;
4909 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4910
4911 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4912 ioa_cfg->sdt_state = GET_DUMP;
4913
4914 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4915}
4916
4917/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918 * ipr_isr - Interrupt service routine
4919 * @irq: irq number
4920 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 *
4922 * Return value:
4923 * IRQ_NONE / IRQ_HANDLED
4924 **/
David Howells7d12e782006-10-05 14:55:46 +01004925static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926{
4927 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4928 unsigned long lock_flags = 0;
Wayne Boyer64ffdb72010-05-19 11:56:13 -07004929 volatile u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 u32 ioasc;
4931 u16 cmd_index;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004932 int num_hrrq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 struct ipr_cmnd *ipr_cmd;
4934 irqreturn_t rc = IRQ_NONE;
4935
4936 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4937
4938 /* If interrupts are disabled, ignore the interrupt */
4939 if (!ioa_cfg->allow_interrupts) {
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 return IRQ_NONE;
4942 }
4943
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944 while (1) {
4945 ipr_cmd = NULL;
4946
4947 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4948 ioa_cfg->toggle_bit) {
4949
4950 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4951 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4952
4953 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004954 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956 return IRQ_HANDLED;
4957 }
4958
4959 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4960
Wayne Boyer96d21f02010-05-10 09:13:27 -07004961 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962
4963 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4964
4965 list_del(&ipr_cmd->queue);
4966 del_timer(&ipr_cmd->timer);
4967 ipr_cmd->done(ipr_cmd);
4968
4969 rc = IRQ_HANDLED;
4970
4971 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4972 ioa_cfg->hrrq_curr++;
4973 } else {
4974 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4975 ioa_cfg->toggle_bit ^= 1u;
4976 }
4977 }
4978
4979 if (ipr_cmd != NULL) {
4980 /* Clear the PCI interrupt */
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004981 do {
Wayne Boyer214777b2010-02-19 13:24:26 -08004982 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer64ffdb72010-05-19 11:56:13 -07004983 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07004984 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4985 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4986
4987 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4988 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4989 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4990 return IRQ_HANDLED;
4991 }
4992
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 } else
4994 break;
4995 }
4996
4997 if (unlikely(rc == IRQ_NONE))
Wayne Boyer64ffdb72010-05-19 11:56:13 -07004998 rc = ipr_handle_other_interrupt(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999
5000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5001 return rc;
5002}
5003
5004/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005005 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 * @ioa_cfg: ioa config struct
5007 * @ipr_cmd: ipr command struct
5008 *
5009 * Return value:
5010 * 0 on success / -1 on failure
5011 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005012static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5013 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005015 int i, nseg;
5016 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 u32 length;
5018 u32 ioadl_flags = 0;
5019 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5020 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005021 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005023 length = scsi_bufflen(scsi_cmd);
5024 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 return 0;
5026
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005027 nseg = scsi_dma_map(scsi_cmd);
5028 if (nseg < 0) {
5029 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5030 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005031 }
5032
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005033 ipr_cmd->dma_use_sg = nseg;
5034
Wayne Boyer438b0332010-05-10 09:13:00 -07005035 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005036 ioarcb->ioadl_len =
5037 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005038
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005039 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5040 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5041 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005042 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5043 ioadl_flags = IPR_IOADL_FLAGS_READ;
5044
5045 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5046 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5047 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5048 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5049 }
5050
5051 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5052 return 0;
5053}
5054
5055/**
5056 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5057 * @ioa_cfg: ioa config struct
5058 * @ipr_cmd: ipr command struct
5059 *
5060 * Return value:
5061 * 0 on success / -1 on failure
5062 **/
5063static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5064 struct ipr_cmnd *ipr_cmd)
5065{
5066 int i, nseg;
5067 struct scatterlist *sg;
5068 u32 length;
5069 u32 ioadl_flags = 0;
5070 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5071 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5072 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5073
5074 length = scsi_bufflen(scsi_cmd);
5075 if (!length)
5076 return 0;
5077
5078 nseg = scsi_dma_map(scsi_cmd);
5079 if (nseg < 0) {
5080 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5081 return -1;
5082 }
5083
5084 ipr_cmd->dma_use_sg = nseg;
5085
5086 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5087 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5088 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5089 ioarcb->data_transfer_length = cpu_to_be32(length);
5090 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005091 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5092 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5093 ioadl_flags = IPR_IOADL_FLAGS_READ;
5094 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5095 ioarcb->read_ioadl_len =
5096 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5097 }
5098
Wayne Boyera32c0552010-02-19 13:23:36 -08005099 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5100 ioadl = ioarcb->u.add_data.u.ioadl;
5101 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5102 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005103 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5104 }
5105
5106 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5107 ioadl[i].flags_and_data_len =
5108 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5109 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5110 }
5111
5112 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5113 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114}
5115
5116/**
5117 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5118 * @scsi_cmd: scsi command struct
5119 *
5120 * Return value:
5121 * task attributes
5122 **/
5123static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5124{
5125 u8 tag[2];
5126 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5127
5128 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5129 switch (tag[0]) {
5130 case MSG_SIMPLE_TAG:
5131 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5132 break;
5133 case MSG_HEAD_TAG:
5134 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5135 break;
5136 case MSG_ORDERED_TAG:
5137 rc = IPR_FLAGS_LO_ORDERED_TASK;
5138 break;
5139 };
5140 }
5141
5142 return rc;
5143}
5144
5145/**
5146 * ipr_erp_done - Process completion of ERP for a device
5147 * @ipr_cmd: ipr command struct
5148 *
5149 * This function copies the sense buffer into the scsi_cmd
5150 * struct and pushes the scsi_done function.
5151 *
5152 * Return value:
5153 * nothing
5154 **/
5155static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5156{
5157 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5158 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5159 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005160 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
5162 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5163 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005164 scmd_printk(KERN_ERR, scsi_cmd,
5165 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166 } else {
5167 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5168 SCSI_SENSE_BUFFERSIZE);
5169 }
5170
5171 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005172 if (!ipr_is_naca_model(res))
5173 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 res->in_erp = 0;
5175 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005176 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5178 scsi_cmd->scsi_done(scsi_cmd);
5179}
5180
5181/**
5182 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5183 * @ipr_cmd: ipr command struct
5184 *
5185 * Return value:
5186 * none
5187 **/
5188static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5189{
Brian King51b1c7e2007-03-29 12:43:50 -05005190 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005191 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08005192 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
5194 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08005195 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005197 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005199 ioasa->hdr.ioasc = 0;
5200 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08005201
5202 if (ipr_cmd->ioa_cfg->sis64)
5203 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5204 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5205 else {
5206 ioarcb->write_ioadl_addr =
5207 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5208 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210}
5211
5212/**
5213 * ipr_erp_request_sense - Send request sense to a device
5214 * @ipr_cmd: ipr command struct
5215 *
5216 * This function sends a request sense to a device as a result
5217 * of a check condition.
5218 *
5219 * Return value:
5220 * nothing
5221 **/
5222static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5223{
5224 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005225 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226
5227 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5228 ipr_erp_done(ipr_cmd);
5229 return;
5230 }
5231
5232 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5233
5234 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5235 cmd_pkt->cdb[0] = REQUEST_SENSE;
5236 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5237 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5238 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5239 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5240
Wayne Boyera32c0552010-02-19 13:23:36 -08005241 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5242 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243
5244 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5245 IPR_REQUEST_SENSE_TIMEOUT * 2);
5246}
5247
5248/**
5249 * ipr_erp_cancel_all - Send cancel all to a device
5250 * @ipr_cmd: ipr command struct
5251 *
5252 * This function sends a cancel all to a device to clear the
5253 * queue. If we are running TCQ on the device, QERR is set to 1,
5254 * which means all outstanding ops have been dropped on the floor.
5255 * Cancel all will return them to us.
5256 *
5257 * Return value:
5258 * nothing
5259 **/
5260static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5261{
5262 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5263 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5264 struct ipr_cmd_pkt *cmd_pkt;
5265
5266 res->in_erp = 1;
5267
5268 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5269
5270 if (!scsi_get_tag_type(scsi_cmd->device)) {
5271 ipr_erp_request_sense(ipr_cmd);
5272 return;
5273 }
5274
5275 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5276 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5277 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5278
5279 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5280 IPR_CANCEL_ALL_TIMEOUT);
5281}
5282
5283/**
5284 * ipr_dump_ioasa - Dump contents of IOASA
5285 * @ioa_cfg: ioa config struct
5286 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06005287 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 *
5289 * This function is invoked by the interrupt handler when ops
5290 * fail. It will log the IOASA if appropriate. Only called
5291 * for GPDD ops.
5292 *
5293 * Return value:
5294 * none
5295 **/
5296static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06005297 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298{
5299 int i;
5300 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05005301 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005302 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005303 __be32 *ioasa_data = (__be32 *)ioasa;
5304 int error_index;
5305
Wayne Boyer96d21f02010-05-10 09:13:27 -07005306 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5307 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308
5309 if (0 == ioasc)
5310 return;
5311
5312 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5313 return;
5314
Brian Kingb0692dd2007-03-29 12:43:09 -05005315 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5316 error_index = ipr_get_error(fd_ioasc);
5317 else
5318 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319
5320 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5321 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07005322 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323 return;
5324
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005325 if (!ipr_is_gscsi(res))
5326 return;
5327
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 if (ipr_error_table[error_index].log_ioasa == 0)
5329 return;
5330 }
5331
Brian Kingfe964d02006-03-29 09:37:29 -06005332 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333
Wayne Boyer96d21f02010-05-10 09:13:27 -07005334 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5335 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5336 data_len = sizeof(struct ipr_ioasa64);
5337 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339
5340 ipr_err("IOASA Dump:\n");
5341
5342 for (i = 0; i < data_len / 4; i += 4) {
5343 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5344 be32_to_cpu(ioasa_data[i]),
5345 be32_to_cpu(ioasa_data[i+1]),
5346 be32_to_cpu(ioasa_data[i+2]),
5347 be32_to_cpu(ioasa_data[i+3]));
5348 }
5349}
5350
5351/**
5352 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5353 * @ioasa: IOASA
5354 * @sense_buf: sense data buffer
5355 *
5356 * Return value:
5357 * none
5358 **/
5359static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5360{
5361 u32 failing_lba;
5362 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5363 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005364 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5365 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366
5367 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5368
5369 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5370 return;
5371
5372 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5373
5374 if (ipr_is_vset_device(res) &&
5375 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5376 ioasa->u.vset.failing_lba_hi != 0) {
5377 sense_buf[0] = 0x72;
5378 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5379 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5380 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5381
5382 sense_buf[7] = 12;
5383 sense_buf[8] = 0;
5384 sense_buf[9] = 0x0A;
5385 sense_buf[10] = 0x80;
5386
5387 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5388
5389 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5390 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5391 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5392 sense_buf[15] = failing_lba & 0x000000ff;
5393
5394 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5395
5396 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5397 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5398 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5399 sense_buf[19] = failing_lba & 0x000000ff;
5400 } else {
5401 sense_buf[0] = 0x70;
5402 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5403 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5404 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5405
5406 /* Illegal request */
5407 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07005408 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409 sense_buf[7] = 10; /* additional length */
5410
5411 /* IOARCB was in error */
5412 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5413 sense_buf[15] = 0xC0;
5414 else /* Parameter data was invalid */
5415 sense_buf[15] = 0x80;
5416
5417 sense_buf[16] =
5418 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005419 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 sense_buf[17] =
5421 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07005422 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 } else {
5424 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5425 if (ipr_is_vset_device(res))
5426 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5427 else
5428 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5429
5430 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5431 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5432 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5433 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5434 sense_buf[6] = failing_lba & 0x000000ff;
5435 }
5436
5437 sense_buf[7] = 6; /* additional length */
5438 }
5439 }
5440}
5441
5442/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005443 * ipr_get_autosense - Copy autosense data to sense buffer
5444 * @ipr_cmd: ipr command struct
5445 *
5446 * This function copies the autosense buffer to the buffer
5447 * in the scsi_cmd, if there is autosense available.
5448 *
5449 * Return value:
5450 * 1 if autosense was available / 0 if not
5451 **/
5452static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5453{
Wayne Boyer96d21f02010-05-10 09:13:27 -07005454 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5455 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005456
Wayne Boyer96d21f02010-05-10 09:13:27 -07005457 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005458 return 0;
5459
Wayne Boyer96d21f02010-05-10 09:13:27 -07005460 if (ipr_cmd->ioa_cfg->sis64)
5461 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5462 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5463 SCSI_SENSE_BUFFERSIZE));
5464 else
5465 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5466 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5467 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005468 return 1;
5469}
5470
5471/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 * ipr_erp_start - Process an error response for a SCSI op
5473 * @ioa_cfg: ioa config struct
5474 * @ipr_cmd: ipr command struct
5475 *
5476 * This function determines whether or not to initiate ERP
5477 * on the affected device.
5478 *
5479 * Return value:
5480 * nothing
5481 **/
5482static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5483 struct ipr_cmnd *ipr_cmd)
5484{
5485 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5486 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005487 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05005488 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489
5490 if (!res) {
5491 ipr_scsi_eh_done(ipr_cmd);
5492 return;
5493 }
5494
Brian King8a048992007-04-26 16:00:10 -05005495 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496 ipr_gen_sense(ipr_cmd);
5497
Brian Kingcc9bd5d2007-03-29 12:43:01 -05005498 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5499
Brian King8a048992007-04-26 16:00:10 -05005500 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005501 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005502 if (ipr_is_naca_model(res))
5503 scsi_cmd->result |= (DID_ABORT << 16);
5504 else
5505 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 break;
5507 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005508 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005509 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5510 break;
5511 case IPR_IOASC_HW_SEL_TIMEOUT:
5512 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005513 if (!ipr_is_naca_model(res))
5514 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005515 break;
5516 case IPR_IOASC_SYNC_REQUIRED:
5517 if (!res->in_erp)
5518 res->needs_sync_complete = 1;
5519 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5520 break;
5521 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06005522 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5524 break;
5525 case IPR_IOASC_BUS_WAS_RESET:
5526 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5527 /*
5528 * Report the bus reset and ask for a retry. The device
5529 * will give CC/UA the next command.
5530 */
5531 if (!res->resetting_device)
5532 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5533 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005534 if (!ipr_is_naca_model(res))
5535 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536 break;
5537 case IPR_IOASC_HW_DEV_BUS_STATUS:
5538 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5539 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005540 if (!ipr_get_autosense(ipr_cmd)) {
5541 if (!ipr_is_naca_model(res)) {
5542 ipr_erp_cancel_all(ipr_cmd);
5543 return;
5544 }
5545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005547 if (!ipr_is_naca_model(res))
5548 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 break;
5550 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5551 break;
5552 default:
Brian King5b7304f2006-08-02 14:57:51 -05005553 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5554 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005555 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005556 res->needs_sync_complete = 1;
5557 break;
5558 }
5559
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005560 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5562 scsi_cmd->scsi_done(scsi_cmd);
5563}
5564
5565/**
5566 * ipr_scsi_done - mid-layer done function
5567 * @ipr_cmd: ipr command struct
5568 *
5569 * This function is invoked by the interrupt handler for
5570 * ops generated by the SCSI mid-layer
5571 *
5572 * Return value:
5573 * none
5574 **/
5575static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5576{
5577 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5578 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005579 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580
Wayne Boyer96d21f02010-05-10 09:13:27 -07005581 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582
5583 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005584 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005585 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5586 scsi_cmd->scsi_done(scsi_cmd);
5587 } else
5588 ipr_erp_start(ioa_cfg, ipr_cmd);
5589}
5590
5591/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005592 * ipr_queuecommand - Queue a mid-layer request
5593 * @scsi_cmd: scsi command struct
5594 * @done: done function
5595 *
5596 * This function queues a request generated by the mid-layer.
5597 *
5598 * Return value:
5599 * 0 on success
5600 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5601 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5602 **/
5603static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5604 void (*done) (struct scsi_cmnd *))
5605{
5606 struct ipr_ioa_cfg *ioa_cfg;
5607 struct ipr_resource_entry *res;
5608 struct ipr_ioarcb *ioarcb;
5609 struct ipr_cmnd *ipr_cmd;
5610 int rc = 0;
5611
5612 scsi_cmd->scsi_done = done;
5613 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5614 res = scsi_cmd->device->hostdata;
5615 scsi_cmd->result = (DID_OK << 16);
5616
5617 /*
5618 * We are currently blocking all devices due to a host reset
5619 * We have told the host to stop giving us new requests, but
5620 * ERP ops don't count. FIXME
5621 */
5622 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5623 return SCSI_MLQUEUE_HOST_BUSY;
5624
5625 /*
5626 * FIXME - Create scsi_set_host_offline interface
5627 * and the ioa_is_dead check can be removed
5628 */
5629 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5630 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5631 scsi_cmd->result = (DID_NO_CONNECT << 16);
5632 scsi_cmd->scsi_done(scsi_cmd);
5633 return 0;
5634 }
5635
Brian King35a39692006-09-25 12:39:20 -05005636 if (ipr_is_gata(res) && res->sata_port)
5637 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5638
Linus Torvalds1da177e2005-04-16 15:20:36 -07005639 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5640 ioarcb = &ipr_cmd->ioarcb;
5641 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5642
5643 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5644 ipr_cmd->scsi_cmd = scsi_cmd;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005645 ioarcb->res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646 ipr_cmd->done = ipr_scsi_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005647 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005648
5649 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5650 if (scsi_cmd->underflow == 0)
5651 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5652
5653 if (res->needs_sync_complete) {
5654 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5655 res->needs_sync_complete = 0;
5656 }
5657
5658 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5659 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5660 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5661 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5662 }
5663
5664 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5665 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5666 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5667
Wayne Boyera32c0552010-02-19 13:23:36 -08005668 if (likely(rc == 0)) {
5669 if (ioa_cfg->sis64)
5670 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5671 else
5672 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5673 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674
5675 if (likely(rc == 0)) {
5676 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08005677 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 } else {
5679 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5680 return SCSI_MLQUEUE_HOST_BUSY;
5681 }
5682
5683 return 0;
5684}
5685
5686/**
Brian King35a39692006-09-25 12:39:20 -05005687 * ipr_ioctl - IOCTL handler
5688 * @sdev: scsi device struct
5689 * @cmd: IOCTL cmd
5690 * @arg: IOCTL arg
5691 *
5692 * Return value:
5693 * 0 on success / other on failure
5694 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06005695static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05005696{
5697 struct ipr_resource_entry *res;
5698
5699 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05005700 if (res && ipr_is_gata(res)) {
5701 if (cmd == HDIO_GET_IDENTITY)
5702 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05005703 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05005704 }
Brian King35a39692006-09-25 12:39:20 -05005705
5706 return -EINVAL;
5707}
5708
5709/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710 * ipr_info - Get information about the card/driver
5711 * @scsi_host: scsi host struct
5712 *
5713 * Return value:
5714 * pointer to buffer with description string
5715 **/
5716static const char * ipr_ioa_info(struct Scsi_Host *host)
5717{
5718 static char buffer[512];
5719 struct ipr_ioa_cfg *ioa_cfg;
5720 unsigned long lock_flags = 0;
5721
5722 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5723
5724 spin_lock_irqsave(host->host_lock, lock_flags);
5725 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5726 spin_unlock_irqrestore(host->host_lock, lock_flags);
5727
5728 return buffer;
5729}
5730
5731static struct scsi_host_template driver_template = {
5732 .module = THIS_MODULE,
5733 .name = "IPR",
5734 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05005735 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005736 .queuecommand = ipr_queuecommand,
5737 .eh_abort_handler = ipr_eh_abort,
5738 .eh_device_reset_handler = ipr_eh_dev_reset,
5739 .eh_host_reset_handler = ipr_eh_host_reset,
5740 .slave_alloc = ipr_slave_alloc,
5741 .slave_configure = ipr_slave_configure,
5742 .slave_destroy = ipr_slave_destroy,
Brian King35a39692006-09-25 12:39:20 -05005743 .target_alloc = ipr_target_alloc,
5744 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005745 .change_queue_depth = ipr_change_queue_depth,
5746 .change_queue_type = ipr_change_queue_type,
5747 .bios_param = ipr_biosparam,
5748 .can_queue = IPR_MAX_COMMANDS,
5749 .this_id = -1,
5750 .sg_tablesize = IPR_MAX_SGLIST,
5751 .max_sectors = IPR_IOA_MAX_SECTORS,
5752 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5753 .use_clustering = ENABLE_CLUSTERING,
5754 .shost_attrs = ipr_ioa_attrs,
5755 .sdev_attrs = ipr_dev_attrs,
5756 .proc_name = IPR_NAME
5757};
5758
Brian King35a39692006-09-25 12:39:20 -05005759/**
5760 * ipr_ata_phy_reset - libata phy_reset handler
5761 * @ap: ata port to reset
5762 *
5763 **/
5764static void ipr_ata_phy_reset(struct ata_port *ap)
5765{
5766 unsigned long flags;
5767 struct ipr_sata_port *sata_port = ap->private_data;
5768 struct ipr_resource_entry *res = sata_port->res;
5769 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5770 int rc;
5771
5772 ENTER;
5773 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5774 while(ioa_cfg->in_reset_reload) {
5775 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5776 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5778 }
5779
5780 if (!ioa_cfg->allow_cmds)
5781 goto out_unlock;
5782
5783 rc = ipr_device_reset(ioa_cfg, res);
5784
5785 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02005786 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05005787 goto out_unlock;
5788 }
5789
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005790 ap->link.device[0].class = res->ata_class;
5791 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02005792 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05005793
5794out_unlock:
5795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5796 LEAVE;
5797}
5798
5799/**
5800 * ipr_ata_post_internal - Cleanup after an internal command
5801 * @qc: ATA queued command
5802 *
5803 * Return value:
5804 * none
5805 **/
5806static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5807{
5808 struct ipr_sata_port *sata_port = qc->ap->private_data;
5809 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5810 struct ipr_cmnd *ipr_cmd;
5811 unsigned long flags;
5812
5813 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King73d98ff2006-11-21 10:27:58 -06005814 while(ioa_cfg->in_reset_reload) {
5815 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5816 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5817 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5818 }
5819
Brian King35a39692006-09-25 12:39:20 -05005820 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5821 if (ipr_cmd->qc == qc) {
5822 ipr_device_reset(ioa_cfg, sata_port->res);
5823 break;
5824 }
5825 }
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5827}
5828
5829/**
Brian King35a39692006-09-25 12:39:20 -05005830 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5831 * @regs: destination
5832 * @tf: source ATA taskfile
5833 *
5834 * Return value:
5835 * none
5836 **/
5837static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5838 struct ata_taskfile *tf)
5839{
5840 regs->feature = tf->feature;
5841 regs->nsect = tf->nsect;
5842 regs->lbal = tf->lbal;
5843 regs->lbam = tf->lbam;
5844 regs->lbah = tf->lbah;
5845 regs->device = tf->device;
5846 regs->command = tf->command;
5847 regs->hob_feature = tf->hob_feature;
5848 regs->hob_nsect = tf->hob_nsect;
5849 regs->hob_lbal = tf->hob_lbal;
5850 regs->hob_lbam = tf->hob_lbam;
5851 regs->hob_lbah = tf->hob_lbah;
5852 regs->ctl = tf->ctl;
5853}
5854
5855/**
5856 * ipr_sata_done - done function for SATA commands
5857 * @ipr_cmd: ipr command struct
5858 *
5859 * This function is invoked by the interrupt handler for
5860 * ops generated by the SCSI mid-layer to SATA devices
5861 *
5862 * Return value:
5863 * none
5864 **/
5865static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5866{
5867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5868 struct ata_queued_cmd *qc = ipr_cmd->qc;
5869 struct ipr_sata_port *sata_port = qc->ap->private_data;
5870 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07005871 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05005872
Wayne Boyer96d21f02010-05-10 09:13:27 -07005873 if (ipr_cmd->ioa_cfg->sis64)
5874 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5875 sizeof(struct ipr_ioasa_gata));
5876 else
5877 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5878 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05005879 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5880
Wayne Boyer96d21f02010-05-10 09:13:27 -07005881 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005882 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05005883
5884 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07005885 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05005886 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07005887 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05005888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5889 ata_qc_complete(qc);
5890}
5891
5892/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005893 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5894 * @ipr_cmd: ipr command struct
5895 * @qc: ATA queued command
5896 *
5897 **/
5898static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5899 struct ata_queued_cmd *qc)
5900{
5901 u32 ioadl_flags = 0;
5902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5903 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5904 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5905 int len = qc->nbytes;
5906 struct scatterlist *sg;
5907 unsigned int si;
5908 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5909
5910 if (len == 0)
5911 return;
5912
5913 if (qc->dma_dir == DMA_TO_DEVICE) {
5914 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5915 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5916 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5917 ioadl_flags = IPR_IOADL_FLAGS_READ;
5918
5919 ioarcb->data_transfer_length = cpu_to_be32(len);
5920 ioarcb->ioadl_len =
5921 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5922 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5923 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5924
5925 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5926 ioadl64->flags = cpu_to_be32(ioadl_flags);
5927 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5928 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5929
5930 last_ioadl64 = ioadl64;
5931 ioadl64++;
5932 }
5933
5934 if (likely(last_ioadl64))
5935 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5936}
5937
5938/**
Brian King35a39692006-09-25 12:39:20 -05005939 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5940 * @ipr_cmd: ipr command struct
5941 * @qc: ATA queued command
5942 *
5943 **/
5944static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5945 struct ata_queued_cmd *qc)
5946{
5947 u32 ioadl_flags = 0;
5948 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005949 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005950 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01005951 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05005952 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09005953 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05005954
5955 if (len == 0)
5956 return;
5957
5958 if (qc->dma_dir == DMA_TO_DEVICE) {
5959 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5960 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005961 ioarcb->data_transfer_length = cpu_to_be32(len);
5962 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05005963 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5964 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5965 ioadl_flags = IPR_IOADL_FLAGS_READ;
5966 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5967 ioarcb->read_ioadl_len =
5968 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5969 }
5970
Tejun Heoff2aeb12007-12-05 16:43:11 +09005971 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05005972 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5973 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005974
5975 last_ioadl = ioadl;
5976 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05005977 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04005978
5979 if (likely(last_ioadl))
5980 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05005981}
5982
5983/**
5984 * ipr_qc_issue - Issue a SATA qc to a device
5985 * @qc: queued command
5986 *
5987 * Return value:
5988 * 0 if success
5989 **/
5990static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5991{
5992 struct ata_port *ap = qc->ap;
5993 struct ipr_sata_port *sata_port = ap->private_data;
5994 struct ipr_resource_entry *res = sata_port->res;
5995 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5996 struct ipr_cmnd *ipr_cmd;
5997 struct ipr_ioarcb *ioarcb;
5998 struct ipr_ioarcb_ata_regs *regs;
5999
6000 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
Brian King0feeed82007-03-29 12:43:43 -05006001 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05006002
6003 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6004 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05006005
Wayne Boyera32c0552010-02-19 13:23:36 -08006006 if (ioa_cfg->sis64) {
6007 regs = &ipr_cmd->i.ata_ioadl.regs;
6008 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6009 } else
6010 regs = &ioarcb->u.add_data.u.regs;
6011
6012 memset(regs, 0, sizeof(*regs));
6013 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05006014
6015 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6016 ipr_cmd->qc = qc;
6017 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006018 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05006019 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6020 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6021 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01006022 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05006023
Wayne Boyera32c0552010-02-19 13:23:36 -08006024 if (ioa_cfg->sis64)
6025 ipr_build_ata_ioadl64(ipr_cmd, qc);
6026 else
6027 ipr_build_ata_ioadl(ipr_cmd, qc);
6028
Brian King35a39692006-09-25 12:39:20 -05006029 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6030 ipr_copy_sata_tf(regs, &qc->tf);
6031 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006032 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05006033
6034 switch (qc->tf.protocol) {
6035 case ATA_PROT_NODATA:
6036 case ATA_PROT_PIO:
6037 break;
6038
6039 case ATA_PROT_DMA:
6040 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6041 break;
6042
Tejun Heo0dc36882007-12-18 16:34:43 -05006043 case ATAPI_PROT_PIO:
6044 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05006045 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6046 break;
6047
Tejun Heo0dc36882007-12-18 16:34:43 -05006048 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05006049 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6050 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6051 break;
6052
6053 default:
6054 WARN_ON(1);
Brian King0feeed82007-03-29 12:43:43 -05006055 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05006056 }
6057
6058 mb();
Wayne Boyera32c0552010-02-19 13:23:36 -08006059
6060 ipr_send_command(ipr_cmd);
6061
Brian King35a39692006-09-25 12:39:20 -05006062 return 0;
6063}
6064
6065/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006066 * ipr_qc_fill_rtf - Read result TF
6067 * @qc: ATA queued command
6068 *
6069 * Return value:
6070 * true
6071 **/
6072static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6073{
6074 struct ipr_sata_port *sata_port = qc->ap->private_data;
6075 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6076 struct ata_taskfile *tf = &qc->result_tf;
6077
6078 tf->feature = g->error;
6079 tf->nsect = g->nsect;
6080 tf->lbal = g->lbal;
6081 tf->lbam = g->lbam;
6082 tf->lbah = g->lbah;
6083 tf->device = g->device;
6084 tf->command = g->status;
6085 tf->hob_nsect = g->hob_nsect;
6086 tf->hob_lbal = g->hob_lbal;
6087 tf->hob_lbam = g->hob_lbam;
6088 tf->hob_lbah = g->hob_lbah;
6089 tf->ctl = g->alt_status;
6090
6091 return true;
6092}
6093
Brian King35a39692006-09-25 12:39:20 -05006094static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05006095 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09006096 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05006097 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05006098 .qc_prep = ata_noop_qc_prep,
6099 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09006100 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05006101 .port_start = ata_sas_port_start,
6102 .port_stop = ata_sas_port_stop
6103};
6104
6105static struct ata_port_info sata_port_info = {
6106 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6107 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6108 .pio_mask = 0x10, /* pio4 */
6109 .mwdma_mask = 0x07,
6110 .udma_mask = 0x7f, /* udma0-6 */
6111 .port_ops = &ipr_sata_ops
6112};
6113
Linus Torvalds1da177e2005-04-16 15:20:36 -07006114#ifdef CONFIG_PPC_PSERIES
6115static const u16 ipr_blocked_processors[] = {
6116 PV_NORTHSTAR,
6117 PV_PULSAR,
6118 PV_POWER4,
6119 PV_ICESTAR,
6120 PV_SSTAR,
6121 PV_POWER4p,
6122 PV_630,
6123 PV_630p
6124};
6125
6126/**
6127 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6128 * @ioa_cfg: ioa cfg struct
6129 *
6130 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6131 * certain pSeries hardware. This function determines if the given
6132 * adapter is in one of these confgurations or not.
6133 *
6134 * Return value:
6135 * 1 if adapter is not supported / 0 if adapter is supported
6136 **/
6137static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6138{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006139 int i;
6140
Auke Kok44c10132007-06-08 15:46:36 -07006141 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6142 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6143 if (__is_processor(ipr_blocked_processors[i]))
6144 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006145 }
6146 }
6147 return 0;
6148}
6149#else
6150#define ipr_invalid_adapter(ioa_cfg) 0
6151#endif
6152
6153/**
6154 * ipr_ioa_bringdown_done - IOA bring down completion.
6155 * @ipr_cmd: ipr command struct
6156 *
6157 * This function processes the completion of an adapter bring down.
6158 * It wakes any reset sleepers.
6159 *
6160 * Return value:
6161 * IPR_RC_JOB_RETURN
6162 **/
6163static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6164{
6165 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6166
6167 ENTER;
6168 ioa_cfg->in_reset_reload = 0;
6169 ioa_cfg->reset_retries = 0;
6170 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6171 wake_up_all(&ioa_cfg->reset_wait_q);
6172
6173 spin_unlock_irq(ioa_cfg->host->host_lock);
6174 scsi_unblock_requests(ioa_cfg->host);
6175 spin_lock_irq(ioa_cfg->host->host_lock);
6176 LEAVE;
6177
6178 return IPR_RC_JOB_RETURN;
6179}
6180
6181/**
6182 * ipr_ioa_reset_done - IOA reset completion.
6183 * @ipr_cmd: ipr command struct
6184 *
6185 * This function processes the completion of an adapter reset.
6186 * It schedules any necessary mid-layer add/removes and
6187 * wakes any reset sleepers.
6188 *
6189 * Return value:
6190 * IPR_RC_JOB_RETURN
6191 **/
6192static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6193{
6194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6195 struct ipr_resource_entry *res;
6196 struct ipr_hostrcb *hostrcb, *temp;
6197 int i = 0;
6198
6199 ENTER;
6200 ioa_cfg->in_reset_reload = 0;
6201 ioa_cfg->allow_cmds = 1;
6202 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06006203 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204
6205 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6206 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6207 ipr_trace;
6208 break;
6209 }
6210 }
6211 schedule_work(&ioa_cfg->work_q);
6212
6213 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6214 list_del(&hostrcb->queue);
6215 if (i++ < IPR_NUM_LOG_HCAMS)
6216 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6217 else
6218 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6219 }
6220
Brian King6bb04172007-04-26 16:00:08 -05006221 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006222 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6223
6224 ioa_cfg->reset_retries = 0;
6225 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6226 wake_up_all(&ioa_cfg->reset_wait_q);
6227
Mark Nelson30237852008-12-10 12:23:20 +11006228 spin_unlock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229 scsi_unblock_requests(ioa_cfg->host);
Mark Nelson30237852008-12-10 12:23:20 +11006230 spin_lock(ioa_cfg->host->host_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006231
6232 if (!ioa_cfg->allow_cmds)
6233 scsi_block_requests(ioa_cfg->host);
6234
6235 LEAVE;
6236 return IPR_RC_JOB_RETURN;
6237}
6238
6239/**
6240 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6241 * @supported_dev: supported device struct
6242 * @vpids: vendor product id struct
6243 *
6244 * Return value:
6245 * none
6246 **/
6247static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6248 struct ipr_std_inq_vpids *vpids)
6249{
6250 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6251 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6252 supported_dev->num_records = 1;
6253 supported_dev->data_length =
6254 cpu_to_be16(sizeof(struct ipr_supported_device));
6255 supported_dev->reserved = 0;
6256}
6257
6258/**
6259 * ipr_set_supported_devs - Send Set Supported Devices for a device
6260 * @ipr_cmd: ipr command struct
6261 *
Wayne Boyera32c0552010-02-19 13:23:36 -08006262 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07006263 *
6264 * Return value:
6265 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6266 **/
6267static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6268{
6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6270 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6272 struct ipr_resource_entry *res = ipr_cmd->u.res;
6273
6274 ipr_cmd->job_step = ipr_ioa_reset_done;
6275
6276 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06006277 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 continue;
6279
6280 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006281 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006282
6283 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6284 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6285 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6286
6287 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006288 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6290 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6291
Wayne Boyera32c0552010-02-19 13:23:36 -08006292 ipr_init_ioadl(ipr_cmd,
6293 ioa_cfg->vpd_cbs_dma +
6294 offsetof(struct ipr_misc_cbs, supp_dev),
6295 sizeof(struct ipr_supported_device),
6296 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297
6298 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6299 IPR_SET_SUP_DEVICE_TIMEOUT);
6300
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006301 if (!ioa_cfg->sis64)
6302 ipr_cmd->job_step = ipr_set_supported_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006303 return IPR_RC_JOB_RETURN;
6304 }
6305
6306 return IPR_RC_JOB_CONTINUE;
6307}
6308
6309/**
6310 * ipr_get_mode_page - Locate specified mode page
6311 * @mode_pages: mode page buffer
6312 * @page_code: page code to find
6313 * @len: minimum required length for mode page
6314 *
6315 * Return value:
6316 * pointer to mode page / NULL on failure
6317 **/
6318static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6319 u32 page_code, u32 len)
6320{
6321 struct ipr_mode_page_hdr *mode_hdr;
6322 u32 page_length;
6323 u32 length;
6324
6325 if (!mode_pages || (mode_pages->hdr.length == 0))
6326 return NULL;
6327
6328 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6329 mode_hdr = (struct ipr_mode_page_hdr *)
6330 (mode_pages->data + mode_pages->hdr.block_desc_len);
6331
6332 while (length) {
6333 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6334 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6335 return mode_hdr;
6336 break;
6337 } else {
6338 page_length = (sizeof(struct ipr_mode_page_hdr) +
6339 mode_hdr->page_length);
6340 length -= page_length;
6341 mode_hdr = (struct ipr_mode_page_hdr *)
6342 ((unsigned long)mode_hdr + page_length);
6343 }
6344 }
6345 return NULL;
6346}
6347
6348/**
6349 * ipr_check_term_power - Check for term power errors
6350 * @ioa_cfg: ioa config struct
6351 * @mode_pages: IOAFP mode pages buffer
6352 *
6353 * Check the IOAFP's mode page 28 for term power errors
6354 *
6355 * Return value:
6356 * nothing
6357 **/
6358static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6359 struct ipr_mode_pages *mode_pages)
6360{
6361 int i;
6362 int entry_length;
6363 struct ipr_dev_bus_entry *bus;
6364 struct ipr_mode_page28 *mode_page;
6365
6366 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6367 sizeof(struct ipr_mode_page28));
6368
6369 entry_length = mode_page->entry_length;
6370
6371 bus = mode_page->bus;
6372
6373 for (i = 0; i < mode_page->num_entries; i++) {
6374 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6375 dev_err(&ioa_cfg->pdev->dev,
6376 "Term power is absent on scsi bus %d\n",
6377 bus->res_addr.bus);
6378 }
6379
6380 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6381 }
6382}
6383
6384/**
6385 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6386 * @ioa_cfg: ioa config struct
6387 *
6388 * Looks through the config table checking for SES devices. If
6389 * the SES device is in the SES table indicating a maximum SCSI
6390 * bus speed, the speed is limited for the bus.
6391 *
6392 * Return value:
6393 * none
6394 **/
6395static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6396{
6397 u32 max_xfer_rate;
6398 int i;
6399
6400 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6401 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6402 ioa_cfg->bus_attr[i].bus_width);
6403
6404 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6405 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6406 }
6407}
6408
6409/**
6410 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6411 * @ioa_cfg: ioa config struct
6412 * @mode_pages: mode page 28 buffer
6413 *
6414 * Updates mode page 28 based on driver configuration
6415 *
6416 * Return value:
6417 * none
6418 **/
6419static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6420 struct ipr_mode_pages *mode_pages)
6421{
6422 int i, entry_length;
6423 struct ipr_dev_bus_entry *bus;
6424 struct ipr_bus_attributes *bus_attr;
6425 struct ipr_mode_page28 *mode_page;
6426
6427 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6428 sizeof(struct ipr_mode_page28));
6429
6430 entry_length = mode_page->entry_length;
6431
6432 /* Loop for each device bus entry */
6433 for (i = 0, bus = mode_page->bus;
6434 i < mode_page->num_entries;
6435 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6436 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6437 dev_err(&ioa_cfg->pdev->dev,
6438 "Invalid resource address reported: 0x%08X\n",
6439 IPR_GET_PHYS_LOC(bus->res_addr));
6440 continue;
6441 }
6442
6443 bus_attr = &ioa_cfg->bus_attr[i];
6444 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6445 bus->bus_width = bus_attr->bus_width;
6446 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6447 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6448 if (bus_attr->qas_enabled)
6449 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6450 else
6451 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6452 }
6453}
6454
6455/**
6456 * ipr_build_mode_select - Build a mode select command
6457 * @ipr_cmd: ipr command struct
6458 * @res_handle: resource handle to send command to
6459 * @parm: Byte 2 of Mode Sense command
6460 * @dma_addr: DMA buffer address
6461 * @xfer_len: data transfer length
6462 *
6463 * Return value:
6464 * none
6465 **/
6466static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08006467 __be32 res_handle, u8 parm,
6468 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006469{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006470 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6471
6472 ioarcb->res_handle = res_handle;
6473 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6474 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6475 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6476 ioarcb->cmd_pkt.cdb[1] = parm;
6477 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6478
Wayne Boyera32c0552010-02-19 13:23:36 -08006479 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006480}
6481
6482/**
6483 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6484 * @ipr_cmd: ipr command struct
6485 *
6486 * This function sets up the SCSI bus attributes and sends
6487 * a Mode Select for Page 28 to activate them.
6488 *
6489 * Return value:
6490 * IPR_RC_JOB_RETURN
6491 **/
6492static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6493{
6494 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6495 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6496 int length;
6497
6498 ENTER;
Brian King47338042006-02-08 20:57:42 -06006499 ipr_scsi_bus_speed_limit(ioa_cfg);
6500 ipr_check_term_power(ioa_cfg, mode_pages);
6501 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6502 length = mode_pages->hdr.length + 1;
6503 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504
6505 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6506 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6507 length);
6508
Wayne Boyerf72919e2010-02-19 13:24:21 -08006509 ipr_cmd->job_step = ipr_set_supported_devs;
6510 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6511 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6513
6514 LEAVE;
6515 return IPR_RC_JOB_RETURN;
6516}
6517
6518/**
6519 * ipr_build_mode_sense - Builds a mode sense command
6520 * @ipr_cmd: ipr command struct
6521 * @res: resource entry struct
6522 * @parm: Byte 2 of mode sense command
6523 * @dma_addr: DMA address of mode sense buffer
6524 * @xfer_len: Size of DMA buffer
6525 *
6526 * Return value:
6527 * none
6528 **/
6529static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6530 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08006531 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006532{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006533 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6534
6535 ioarcb->res_handle = res_handle;
6536 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6537 ioarcb->cmd_pkt.cdb[2] = parm;
6538 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6539 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6540
Wayne Boyera32c0552010-02-19 13:23:36 -08006541 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006542}
6543
6544/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006545 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6546 * @ipr_cmd: ipr command struct
6547 *
6548 * This function handles the failure of an IOA bringup command.
6549 *
6550 * Return value:
6551 * IPR_RC_JOB_RETURN
6552 **/
6553static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6554{
6555 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006556 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006557
6558 dev_err(&ioa_cfg->pdev->dev,
6559 "0x%02X failed with IOASC: 0x%08X\n",
6560 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6561
6562 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6563 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6564 return IPR_RC_JOB_RETURN;
6565}
6566
6567/**
6568 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6569 * @ipr_cmd: ipr command struct
6570 *
6571 * This function handles the failure of a Mode Sense to the IOAFP.
6572 * Some adapters do not handle all mode pages.
6573 *
6574 * Return value:
6575 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6576 **/
6577static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6578{
Wayne Boyerf72919e2010-02-19 13:24:21 -08006579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006580 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006581
6582 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08006583 ipr_cmd->job_step = ipr_set_supported_devs;
6584 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6585 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006586 return IPR_RC_JOB_CONTINUE;
6587 }
6588
6589 return ipr_reset_cmd_failed(ipr_cmd);
6590}
6591
6592/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6594 * @ipr_cmd: ipr command struct
6595 *
6596 * This function send a Page 28 mode sense to the IOA to
6597 * retrieve SCSI bus attributes.
6598 *
6599 * Return value:
6600 * IPR_RC_JOB_RETURN
6601 **/
6602static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6603{
6604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6605
6606 ENTER;
6607 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6608 0x28, ioa_cfg->vpd_cbs_dma +
6609 offsetof(struct ipr_misc_cbs, mode_pages),
6610 sizeof(struct ipr_mode_pages));
6611
6612 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06006613 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614
6615 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6616
6617 LEAVE;
6618 return IPR_RC_JOB_RETURN;
6619}
6620
6621/**
Brian Kingac09c342007-04-26 16:00:16 -05006622 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6623 * @ipr_cmd: ipr command struct
6624 *
6625 * This function enables dual IOA RAID support if possible.
6626 *
6627 * Return value:
6628 * IPR_RC_JOB_RETURN
6629 **/
6630static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6631{
6632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6633 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6634 struct ipr_mode_page24 *mode_page;
6635 int length;
6636
6637 ENTER;
6638 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6639 sizeof(struct ipr_mode_page24));
6640
6641 if (mode_page)
6642 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6643
6644 length = mode_pages->hdr.length + 1;
6645 mode_pages->hdr.length = 0;
6646
6647 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6648 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6649 length);
6650
6651 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6652 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6653
6654 LEAVE;
6655 return IPR_RC_JOB_RETURN;
6656}
6657
6658/**
6659 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6660 * @ipr_cmd: ipr command struct
6661 *
6662 * This function handles the failure of a Mode Sense to the IOAFP.
6663 * Some adapters do not handle all mode pages.
6664 *
6665 * Return value:
6666 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6667 **/
6668static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6669{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006670 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05006671
6672 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6673 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6674 return IPR_RC_JOB_CONTINUE;
6675 }
6676
6677 return ipr_reset_cmd_failed(ipr_cmd);
6678}
6679
6680/**
6681 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6682 * @ipr_cmd: ipr command struct
6683 *
6684 * This function send a mode sense to the IOA to retrieve
6685 * the IOA Advanced Function Control mode page.
6686 *
6687 * Return value:
6688 * IPR_RC_JOB_RETURN
6689 **/
6690static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6691{
6692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6693
6694 ENTER;
6695 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6696 0x24, ioa_cfg->vpd_cbs_dma +
6697 offsetof(struct ipr_misc_cbs, mode_pages),
6698 sizeof(struct ipr_mode_pages));
6699
6700 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6701 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6702
6703 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6704
6705 LEAVE;
6706 return IPR_RC_JOB_RETURN;
6707}
6708
6709/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006710 * ipr_init_res_table - Initialize the resource table
6711 * @ipr_cmd: ipr command struct
6712 *
6713 * This function looks through the existing resource table, comparing
6714 * it with the config table. This function will take care of old/new
6715 * devices and schedule adding/removing them from the mid-layer
6716 * as appropriate.
6717 *
6718 * Return value:
6719 * IPR_RC_JOB_CONTINUE
6720 **/
6721static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6722{
6723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6724 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006725 struct ipr_config_table_entry_wrapper cfgtew;
6726 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727 LIST_HEAD(old_res);
6728
6729 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006730 if (ioa_cfg->sis64)
6731 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6732 else
6733 flag = ioa_cfg->u.cfg_table->hdr.flags;
6734
6735 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006736 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6737
6738 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6739 list_move_tail(&res->queue, &old_res);
6740
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006741 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07006742 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006743 else
6744 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6745
6746 for (i = 0; i < entries; i++) {
6747 if (ioa_cfg->sis64)
6748 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6749 else
6750 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006751 found = 0;
6752
6753 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006754 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006755 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6756 found = 1;
6757 break;
6758 }
6759 }
6760
6761 if (!found) {
6762 if (list_empty(&ioa_cfg->free_res_q)) {
6763 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6764 break;
6765 }
6766
6767 found = 1;
6768 res = list_entry(ioa_cfg->free_res_q.next,
6769 struct ipr_resource_entry, queue);
6770 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006771 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006772 res->add_to_ml = 1;
6773 }
6774
6775 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006776 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777 }
6778
6779 list_for_each_entry_safe(res, temp, &old_res, queue) {
6780 if (res->sdev) {
6781 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006782 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 }
6785 }
6786
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006787 list_for_each_entry_safe(res, temp, &old_res, queue) {
6788 ipr_clear_res_target(res);
6789 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6790 }
6791
Brian Kingac09c342007-04-26 16:00:16 -05006792 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6793 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6794 else
6795 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006796
6797 LEAVE;
6798 return IPR_RC_JOB_CONTINUE;
6799}
6800
6801/**
6802 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6803 * @ipr_cmd: ipr command struct
6804 *
6805 * This function sends a Query IOA Configuration command
6806 * to the adapter to retrieve the IOA configuration table.
6807 *
6808 * Return value:
6809 * IPR_RC_JOB_RETURN
6810 **/
6811static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6812{
6813 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6814 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006815 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05006816 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006817
6818 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05006819 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6820 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006821 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6822 ucode_vpd->major_release, ucode_vpd->card_type,
6823 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6824 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6825 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6826
6827 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07006828 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006829 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6830 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006831
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006832 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08006833 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834
6835 ipr_cmd->job_step = ipr_init_res_table;
6836
6837 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6838
6839 LEAVE;
6840 return IPR_RC_JOB_RETURN;
6841}
6842
6843/**
6844 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6845 * @ipr_cmd: ipr command struct
6846 *
6847 * This utility function sends an inquiry to the adapter.
6848 *
6849 * Return value:
6850 * none
6851 **/
6852static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08006853 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006854{
6855 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856
6857 ENTER;
6858 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6859 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6860
6861 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6862 ioarcb->cmd_pkt.cdb[1] = flags;
6863 ioarcb->cmd_pkt.cdb[2] = page;
6864 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6865
Wayne Boyera32c0552010-02-19 13:23:36 -08006866 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867
6868 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6869 LEAVE;
6870}
6871
6872/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06006873 * ipr_inquiry_page_supported - Is the given inquiry page supported
6874 * @page0: inquiry page 0 buffer
6875 * @page: page code.
6876 *
6877 * This function determines if the specified inquiry page is supported.
6878 *
6879 * Return value:
6880 * 1 if page is supported / 0 if not
6881 **/
6882static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6883{
6884 int i;
6885
6886 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6887 if (page0->page[i] == page)
6888 return 1;
6889
6890 return 0;
6891}
6892
6893/**
Brian Kingac09c342007-04-26 16:00:16 -05006894 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6895 * @ipr_cmd: ipr command struct
6896 *
6897 * This function sends a Page 0xD0 inquiry to the adapter
6898 * to retrieve adapter capabilities.
6899 *
6900 * Return value:
6901 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6902 **/
6903static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6904{
6905 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6906 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6907 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6908
6909 ENTER;
6910 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6911 memset(cap, 0, sizeof(*cap));
6912
6913 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6914 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6915 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6916 sizeof(struct ipr_inquiry_cap));
6917 return IPR_RC_JOB_RETURN;
6918 }
6919
6920 LEAVE;
6921 return IPR_RC_JOB_CONTINUE;
6922}
6923
6924/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006925 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6926 * @ipr_cmd: ipr command struct
6927 *
6928 * This function sends a Page 3 inquiry to the adapter
6929 * to retrieve software VPD information.
6930 *
6931 * Return value:
6932 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6933 **/
6934static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6935{
6936 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006937
6938 ENTER;
6939
Brian Kingac09c342007-04-26 16:00:16 -05006940 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006941
6942 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6943 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6944 sizeof(struct ipr_inquiry_page3));
6945
6946 LEAVE;
6947 return IPR_RC_JOB_RETURN;
6948}
6949
6950/**
6951 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6952 * @ipr_cmd: ipr command struct
6953 *
6954 * This function sends a Page 0 inquiry to the adapter
6955 * to retrieve supported inquiry pages.
6956 *
6957 * Return value:
6958 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6959 **/
6960static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6961{
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006963 char type[5];
6964
6965 ENTER;
6966
6967 /* Grab the type out of the VPD and store it away */
6968 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6969 type[4] = '\0';
6970 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6971
brking@us.ibm.com62275042005-11-01 17:01:14 -06006972 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006973
brking@us.ibm.com62275042005-11-01 17:01:14 -06006974 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6975 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6976 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977
6978 LEAVE;
6979 return IPR_RC_JOB_RETURN;
6980}
6981
6982/**
6983 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6984 * @ipr_cmd: ipr command struct
6985 *
6986 * This function sends a standard inquiry to the adapter.
6987 *
6988 * Return value:
6989 * IPR_RC_JOB_RETURN
6990 **/
6991static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6992{
6993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6994
6995 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06006996 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006997
6998 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6999 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7000 sizeof(struct ipr_ioa_vpd));
7001
7002 LEAVE;
7003 return IPR_RC_JOB_RETURN;
7004}
7005
7006/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007007 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007008 * @ipr_cmd: ipr command struct
7009 *
7010 * This function send an Identify Host Request Response Queue
7011 * command to establish the HRRQ with the adapter.
7012 *
7013 * Return value:
7014 * IPR_RC_JOB_RETURN
7015 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08007016static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007017{
7018 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7019 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7020
7021 ENTER;
7022 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7023
7024 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7025 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7026
7027 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
Wayne Boyer214777b2010-02-19 13:24:26 -08007028 if (ioa_cfg->sis64)
7029 ioarcb->cmd_pkt.cdb[1] = 0x1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007030 ioarcb->cmd_pkt.cdb[2] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007031 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032 ioarcb->cmd_pkt.cdb[3] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007033 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007034 ioarcb->cmd_pkt.cdb[4] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007035 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036 ioarcb->cmd_pkt.cdb[5] =
Wayne Boyer214777b2010-02-19 13:24:26 -08007037 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038 ioarcb->cmd_pkt.cdb[7] =
7039 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7040 ioarcb->cmd_pkt.cdb[8] =
7041 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7042
Wayne Boyer214777b2010-02-19 13:24:26 -08007043 if (ioa_cfg->sis64) {
7044 ioarcb->cmd_pkt.cdb[10] =
7045 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7046 ioarcb->cmd_pkt.cdb[11] =
7047 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7048 ioarcb->cmd_pkt.cdb[12] =
7049 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7050 ioarcb->cmd_pkt.cdb[13] =
7051 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7052 }
7053
Linus Torvalds1da177e2005-04-16 15:20:36 -07007054 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7055
7056 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7057
7058 LEAVE;
7059 return IPR_RC_JOB_RETURN;
7060}
7061
7062/**
7063 * ipr_reset_timer_done - Adapter reset timer function
7064 * @ipr_cmd: ipr command struct
7065 *
7066 * Description: This function is used in adapter reset processing
7067 * for timing events. If the reset_cmd pointer in the IOA
7068 * config struct is not this adapter's we are doing nested
7069 * resets and fail_all_ops will take care of freeing the
7070 * command block.
7071 *
7072 * Return value:
7073 * none
7074 **/
7075static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7076{
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7078 unsigned long lock_flags = 0;
7079
7080 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7081
7082 if (ioa_cfg->reset_cmd == ipr_cmd) {
7083 list_del(&ipr_cmd->queue);
7084 ipr_cmd->done(ipr_cmd);
7085 }
7086
7087 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7088}
7089
7090/**
7091 * ipr_reset_start_timer - Start a timer for adapter reset job
7092 * @ipr_cmd: ipr command struct
7093 * @timeout: timeout value
7094 *
7095 * Description: This function is used in adapter reset processing
7096 * for timing events. If the reset_cmd pointer in the IOA
7097 * config struct is not this adapter's we are doing nested
7098 * resets and fail_all_ops will take care of freeing the
7099 * command block.
7100 *
7101 * Return value:
7102 * none
7103 **/
7104static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7105 unsigned long timeout)
7106{
7107 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7108 ipr_cmd->done = ipr_reset_ioa_job;
7109
7110 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7111 ipr_cmd->timer.expires = jiffies + timeout;
7112 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7113 add_timer(&ipr_cmd->timer);
7114}
7115
7116/**
7117 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7118 * @ioa_cfg: ioa cfg struct
7119 *
7120 * Return value:
7121 * nothing
7122 **/
7123static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7124{
7125 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7126
7127 /* Initialize Host RRQ pointers */
7128 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7129 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7130 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7131 ioa_cfg->toggle_bit = 1;
7132
7133 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007134 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135}
7136
7137/**
Wayne Boyer214777b2010-02-19 13:24:26 -08007138 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7139 * @ipr_cmd: ipr command struct
7140 *
7141 * Return value:
7142 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7143 **/
7144static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7145{
7146 unsigned long stage, stage_time;
7147 u32 feedback;
7148 volatile u32 int_reg;
7149 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7150 u64 maskval = 0;
7151
7152 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7153 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7154 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7155
7156 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7157
7158 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07007159 if (stage_time == 0)
7160 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7161 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08007162 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7163 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7164 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7165
7166 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7167 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7168 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7169 stage_time = ioa_cfg->transop_timeout;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7172 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7173 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7174 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7175 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7176 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7177 return IPR_RC_JOB_CONTINUE;
7178 }
7179
7180 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7181 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7182 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7183 ipr_cmd->done = ipr_reset_ioa_job;
7184 add_timer(&ipr_cmd->timer);
7185 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7186
7187 return IPR_RC_JOB_RETURN;
7188}
7189
7190/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007191 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7192 * @ipr_cmd: ipr command struct
7193 *
7194 * This function reinitializes some control blocks and
7195 * enables destructive diagnostics on the adapter.
7196 *
7197 * Return value:
7198 * IPR_RC_JOB_RETURN
7199 **/
7200static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7201{
7202 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7203 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07007204 volatile u64 maskval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205
7206 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08007207 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208 ipr_init_ioa_mem(ioa_cfg);
7209
7210 ioa_cfg->allow_interrupts = 1;
Wayne Boyer7be96902010-05-10 09:14:07 -07007211 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212
7213 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7214 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08007215 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007216 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7217 return IPR_RC_JOB_CONTINUE;
7218 }
7219
7220 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08007221 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007222
Wayne Boyer7be96902010-05-10 09:14:07 -07007223 if (ioa_cfg->sis64) {
7224 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7225 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7226 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7227 } else
7228 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08007229
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7231
7232 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7233
Wayne Boyer214777b2010-02-19 13:24:26 -08007234 if (ioa_cfg->sis64) {
7235 ipr_cmd->job_step = ipr_reset_next_stage;
7236 return IPR_RC_JOB_CONTINUE;
7237 }
7238
Linus Torvalds1da177e2005-04-16 15:20:36 -07007239 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
Brian King5469cb52007-03-29 12:42:40 -05007240 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7242 ipr_cmd->done = ipr_reset_ioa_job;
7243 add_timer(&ipr_cmd->timer);
7244 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7245
7246 LEAVE;
7247 return IPR_RC_JOB_RETURN;
7248}
7249
7250/**
7251 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7252 * @ipr_cmd: ipr command struct
7253 *
7254 * This function is invoked when an adapter dump has run out
7255 * of processing time.
7256 *
7257 * Return value:
7258 * IPR_RC_JOB_CONTINUE
7259 **/
7260static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7261{
7262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7263
7264 if (ioa_cfg->sdt_state == GET_DUMP)
7265 ioa_cfg->sdt_state = ABORT_DUMP;
7266
7267 ipr_cmd->job_step = ipr_reset_alert;
7268
7269 return IPR_RC_JOB_CONTINUE;
7270}
7271
7272/**
7273 * ipr_unit_check_no_data - Log a unit check/no data error log
7274 * @ioa_cfg: ioa config struct
7275 *
7276 * Logs an error indicating the adapter unit checked, but for some
7277 * reason, we were unable to fetch the unit check buffer.
7278 *
7279 * Return value:
7280 * nothing
7281 **/
7282static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7283{
7284 ioa_cfg->errors_logged++;
7285 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7286}
7287
7288/**
7289 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7290 * @ioa_cfg: ioa config struct
7291 *
7292 * Fetches the unit check buffer from the adapter by clocking the data
7293 * through the mailbox register.
7294 *
7295 * Return value:
7296 * nothing
7297 **/
7298static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7299{
7300 unsigned long mailbox;
7301 struct ipr_hostrcb *hostrcb;
7302 struct ipr_uc_sdt sdt;
7303 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05007304 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007305
7306 mailbox = readl(ioa_cfg->ioa_mailbox);
7307
Wayne Boyerdcbad002010-02-19 13:24:14 -08007308 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309 ipr_unit_check_no_data(ioa_cfg);
7310 return;
7311 }
7312
7313 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7314 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7315 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7316
Wayne Boyerdcbad002010-02-19 13:24:14 -08007317 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7318 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7319 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007320 ipr_unit_check_no_data(ioa_cfg);
7321 return;
7322 }
7323
7324 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08007325 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7326 length = be32_to_cpu(sdt.entry[0].end_token);
7327 else
7328 length = (be32_to_cpu(sdt.entry[0].end_token) -
7329 be32_to_cpu(sdt.entry[0].start_token)) &
7330 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007331
7332 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7333 struct ipr_hostrcb, queue);
7334 list_del(&hostrcb->queue);
7335 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7336
7337 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08007338 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07007339 (__be32 *)&hostrcb->hcam,
7340 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7341
Brian King65f56472007-04-26 16:00:12 -05007342 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007343 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08007344 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05007345 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7346 ioa_cfg->sdt_state == GET_DUMP)
7347 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7348 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349 ipr_unit_check_no_data(ioa_cfg);
7350
7351 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7352}
7353
7354/**
7355 * ipr_reset_restore_cfg_space - Restore PCI config space.
7356 * @ipr_cmd: ipr command struct
7357 *
7358 * Description: This function restores the saved PCI config space of
7359 * the adapter, fails all outstanding ops back to the callers, and
7360 * fetches the dump/unit check if applicable to this reset.
7361 *
7362 * Return value:
7363 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7364 **/
7365static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7366{
7367 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7368 int rc;
7369
7370 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02007371 ioa_cfg->pdev->state_saved = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372 rc = pci_restore_state(ioa_cfg->pdev);
7373
7374 if (rc != PCIBIOS_SUCCESSFUL) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07007375 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376 return IPR_RC_JOB_CONTINUE;
7377 }
7378
7379 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07007380 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007381 return IPR_RC_JOB_CONTINUE;
7382 }
7383
7384 ipr_fail_all_ops(ioa_cfg);
7385
7386 if (ioa_cfg->ioa_unit_checked) {
7387 ioa_cfg->ioa_unit_checked = 0;
7388 ipr_get_unit_check_buffer(ioa_cfg);
7389 ipr_cmd->job_step = ipr_reset_alert;
7390 ipr_reset_start_timer(ipr_cmd, 0);
7391 return IPR_RC_JOB_RETURN;
7392 }
7393
7394 if (ioa_cfg->in_ioa_bringdown) {
7395 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7396 } else {
7397 ipr_cmd->job_step = ipr_reset_enable_ioa;
7398
7399 if (GET_DUMP == ioa_cfg->sdt_state) {
7400 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7401 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7402 schedule_work(&ioa_cfg->work_q);
7403 return IPR_RC_JOB_RETURN;
7404 }
7405 }
7406
Wayne Boyer438b0332010-05-10 09:13:00 -07007407 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007408 return IPR_RC_JOB_CONTINUE;
7409}
7410
7411/**
Brian Kinge619e1a2007-01-23 11:25:37 -06007412 * ipr_reset_bist_done - BIST has completed on the adapter.
7413 * @ipr_cmd: ipr command struct
7414 *
7415 * Description: Unblock config space and resume the reset process.
7416 *
7417 * Return value:
7418 * IPR_RC_JOB_CONTINUE
7419 **/
7420static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7421{
7422 ENTER;
7423 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7424 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7425 LEAVE;
7426 return IPR_RC_JOB_CONTINUE;
7427}
7428
7429/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007430 * ipr_reset_start_bist - Run BIST on the adapter.
7431 * @ipr_cmd: ipr command struct
7432 *
7433 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7434 *
7435 * Return value:
7436 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7437 **/
7438static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7439{
7440 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7441 int rc;
7442
7443 ENTER;
Brian Kingb30197d2005-09-27 01:21:56 -07007444 pci_block_user_cfg_access(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007445 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7446
7447 if (rc != PCIBIOS_SUCCESSFUL) {
Brian Kinga9aedb02007-03-29 12:43:23 -05007448 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
Wayne Boyer96d21f02010-05-10 09:13:27 -07007449 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007450 rc = IPR_RC_JOB_CONTINUE;
7451 } else {
Brian Kinge619e1a2007-01-23 11:25:37 -06007452 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007453 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7454 rc = IPR_RC_JOB_RETURN;
7455 }
7456
7457 LEAVE;
7458 return rc;
7459}
7460
7461/**
Brian King463fc692007-05-07 17:09:05 -05007462 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7463 * @ipr_cmd: ipr command struct
7464 *
7465 * Description: This clears PCI reset to the adapter and delays two seconds.
7466 *
7467 * Return value:
7468 * IPR_RC_JOB_RETURN
7469 **/
7470static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7471{
7472 ENTER;
7473 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7474 ipr_cmd->job_step = ipr_reset_bist_done;
7475 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7476 LEAVE;
7477 return IPR_RC_JOB_RETURN;
7478}
7479
7480/**
7481 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7482 * @ipr_cmd: ipr command struct
7483 *
7484 * Description: This asserts PCI reset to the adapter.
7485 *
7486 * Return value:
7487 * IPR_RC_JOB_RETURN
7488 **/
7489static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7490{
7491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492 struct pci_dev *pdev = ioa_cfg->pdev;
7493
7494 ENTER;
7495 pci_block_user_cfg_access(pdev);
7496 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7497 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7498 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7499 LEAVE;
7500 return IPR_RC_JOB_RETURN;
7501}
7502
7503/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007504 * ipr_reset_allowed - Query whether or not IOA can be reset
7505 * @ioa_cfg: ioa config struct
7506 *
7507 * Return value:
7508 * 0 if reset not allowed / non-zero if reset is allowed
7509 **/
7510static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7511{
7512 volatile u32 temp_reg;
7513
7514 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7515 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7516}
7517
7518/**
7519 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7520 * @ipr_cmd: ipr command struct
7521 *
7522 * Description: This function waits for adapter permission to run BIST,
7523 * then runs BIST. If the adapter does not give permission after a
7524 * reasonable time, we will reset the adapter anyway. The impact of
7525 * resetting the adapter without warning the adapter is the risk of
7526 * losing the persistent error log on the adapter. If the adapter is
7527 * reset while it is writing to the flash on the adapter, the flash
7528 * segment will have bad ECC and be zeroed.
7529 *
7530 * Return value:
7531 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7532 **/
7533static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7534{
7535 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536 int rc = IPR_RC_JOB_RETURN;
7537
7538 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7539 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7540 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7541 } else {
Brian King463fc692007-05-07 17:09:05 -05007542 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543 rc = IPR_RC_JOB_CONTINUE;
7544 }
7545
7546 return rc;
7547}
7548
7549/**
7550 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7551 * @ipr_cmd: ipr command struct
7552 *
7553 * Description: This function alerts the adapter that it will be reset.
7554 * If memory space is not currently enabled, proceed directly
7555 * to running BIST on the adapter. The timer must always be started
7556 * so we guarantee we do not run BIST from ipr_isr.
7557 *
7558 * Return value:
7559 * IPR_RC_JOB_RETURN
7560 **/
7561static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7562{
7563 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7564 u16 cmd_reg;
7565 int rc;
7566
7567 ENTER;
7568 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7569
7570 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7571 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08007572 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7574 } else {
Brian King463fc692007-05-07 17:09:05 -05007575 ipr_cmd->job_step = ioa_cfg->reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007576 }
7577
7578 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7579 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7580
7581 LEAVE;
7582 return IPR_RC_JOB_RETURN;
7583}
7584
7585/**
7586 * ipr_reset_ucode_download_done - Microcode download completion
7587 * @ipr_cmd: ipr command struct
7588 *
7589 * Description: This function unmaps the microcode download buffer.
7590 *
7591 * Return value:
7592 * IPR_RC_JOB_CONTINUE
7593 **/
7594static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7595{
7596 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7597 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7598
7599 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7600 sglist->num_sg, DMA_TO_DEVICE);
7601
7602 ipr_cmd->job_step = ipr_reset_alert;
7603 return IPR_RC_JOB_CONTINUE;
7604}
7605
7606/**
7607 * ipr_reset_ucode_download - Download microcode to the adapter
7608 * @ipr_cmd: ipr command struct
7609 *
7610 * Description: This function checks to see if it there is microcode
7611 * to download to the adapter. If there is, a download is performed.
7612 *
7613 * Return value:
7614 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7615 **/
7616static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7617{
7618 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7619 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7620
7621 ENTER;
7622 ipr_cmd->job_step = ipr_reset_alert;
7623
7624 if (!sglist)
7625 return IPR_RC_JOB_CONTINUE;
7626
7627 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7628 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7629 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7630 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7631 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7632 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7633 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7634
Wayne Boyera32c0552010-02-19 13:23:36 -08007635 if (ioa_cfg->sis64)
7636 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7637 else
7638 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007639 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7640
7641 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7642 IPR_WRITE_BUFFER_TIMEOUT);
7643
7644 LEAVE;
7645 return IPR_RC_JOB_RETURN;
7646}
7647
7648/**
7649 * ipr_reset_shutdown_ioa - Shutdown the adapter
7650 * @ipr_cmd: ipr command struct
7651 *
7652 * Description: This function issues an adapter shutdown of the
7653 * specified type to the specified adapter as part of the
7654 * adapter reset job.
7655 *
7656 * Return value:
7657 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7658 **/
7659static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7660{
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7662 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7663 unsigned long timeout;
7664 int rc = IPR_RC_JOB_CONTINUE;
7665
7666 ENTER;
7667 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7668 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7669 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7670 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7671 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7672
Brian Kingac09c342007-04-26 16:00:16 -05007673 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7674 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007675 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7676 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05007677 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7678 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007679 else
Brian Kingac09c342007-04-26 16:00:16 -05007680 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007681
7682 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7683
7684 rc = IPR_RC_JOB_RETURN;
7685 ipr_cmd->job_step = ipr_reset_ucode_download;
7686 } else
7687 ipr_cmd->job_step = ipr_reset_alert;
7688
7689 LEAVE;
7690 return rc;
7691}
7692
7693/**
7694 * ipr_reset_ioa_job - Adapter reset job
7695 * @ipr_cmd: ipr command struct
7696 *
7697 * Description: This function is the job router for the adapter reset job.
7698 *
7699 * Return value:
7700 * none
7701 **/
7702static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7703{
7704 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007705 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7706
7707 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07007708 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007709
7710 if (ioa_cfg->reset_cmd != ipr_cmd) {
7711 /*
7712 * We are doing nested adapter resets and this is
7713 * not the current reset job.
7714 */
7715 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7716 return;
7717 }
7718
7719 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007720 rc = ipr_cmd->job_step_failed(ipr_cmd);
7721 if (rc == IPR_RC_JOB_RETURN)
7722 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007723 }
7724
7725 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007726 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007727 rc = ipr_cmd->job_step(ipr_cmd);
7728 } while(rc == IPR_RC_JOB_CONTINUE);
7729}
7730
7731/**
7732 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7733 * @ioa_cfg: ioa config struct
7734 * @job_step: first job step of reset job
7735 * @shutdown_type: shutdown type
7736 *
7737 * Description: This function will initiate the reset of the given adapter
7738 * starting at the selected job step.
7739 * If the caller needs to wait on the completion of the reset,
7740 * the caller must sleep on the reset_wait_q.
7741 *
7742 * Return value:
7743 * none
7744 **/
7745static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7746 int (*job_step) (struct ipr_cmnd *),
7747 enum ipr_shutdown_type shutdown_type)
7748{
7749 struct ipr_cmnd *ipr_cmd;
7750
7751 ioa_cfg->in_reset_reload = 1;
7752 ioa_cfg->allow_cmds = 0;
7753 scsi_block_requests(ioa_cfg->host);
7754
7755 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7756 ioa_cfg->reset_cmd = ipr_cmd;
7757 ipr_cmd->job_step = job_step;
7758 ipr_cmd->u.shutdown_type = shutdown_type;
7759
7760 ipr_reset_ioa_job(ipr_cmd);
7761}
7762
7763/**
7764 * ipr_initiate_ioa_reset - Initiate an adapter reset
7765 * @ioa_cfg: ioa config struct
7766 * @shutdown_type: shutdown type
7767 *
7768 * Description: This function will initiate the reset of the given adapter.
7769 * If the caller needs to wait on the completion of the reset,
7770 * the caller must sleep on the reset_wait_q.
7771 *
7772 * Return value:
7773 * none
7774 **/
7775static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7776 enum ipr_shutdown_type shutdown_type)
7777{
7778 if (ioa_cfg->ioa_is_dead)
7779 return;
7780
7781 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7782 ioa_cfg->sdt_state = ABORT_DUMP;
7783
7784 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7785 dev_err(&ioa_cfg->pdev->dev,
7786 "IOA taken offline - error recovery failed\n");
7787
7788 ioa_cfg->reset_retries = 0;
7789 ioa_cfg->ioa_is_dead = 1;
7790
7791 if (ioa_cfg->in_ioa_bringdown) {
7792 ioa_cfg->reset_cmd = NULL;
7793 ioa_cfg->in_reset_reload = 0;
7794 ipr_fail_all_ops(ioa_cfg);
7795 wake_up_all(&ioa_cfg->reset_wait_q);
7796
7797 spin_unlock_irq(ioa_cfg->host->host_lock);
7798 scsi_unblock_requests(ioa_cfg->host);
7799 spin_lock_irq(ioa_cfg->host->host_lock);
7800 return;
7801 } else {
7802 ioa_cfg->in_ioa_bringdown = 1;
7803 shutdown_type = IPR_SHUTDOWN_NONE;
7804 }
7805 }
7806
7807 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7808 shutdown_type);
7809}
7810
7811/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007812 * ipr_reset_freeze - Hold off all I/O activity
7813 * @ipr_cmd: ipr command struct
7814 *
7815 * Description: If the PCI slot is frozen, hold off all I/O
7816 * activity; then, as soon as the slot is available again,
7817 * initiate an adapter reset.
7818 */
7819static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7820{
7821 /* Disallow new interrupts, avoid loop */
7822 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7823 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7824 ipr_cmd->done = ipr_reset_ioa_job;
7825 return IPR_RC_JOB_RETURN;
7826}
7827
7828/**
7829 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7830 * @pdev: PCI device struct
7831 *
7832 * Description: This routine is called to tell us that the PCI bus
7833 * is down. Can't do anything here, except put the device driver
7834 * into a holding pattern, waiting for the PCI bus to come back.
7835 */
7836static void ipr_pci_frozen(struct pci_dev *pdev)
7837{
7838 unsigned long flags = 0;
7839 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7840
7841 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7842 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7844}
7845
7846/**
7847 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7848 * @pdev: PCI device struct
7849 *
7850 * Description: This routine is called by the pci error recovery
7851 * code after the PCI slot has been reset, just before we
7852 * should resume normal operations.
7853 */
7854static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7855{
7856 unsigned long flags = 0;
7857 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7858
7859 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King463fc692007-05-07 17:09:05 -05007860 if (ioa_cfg->needs_warm_reset)
7861 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7862 else
7863 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7864 IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007865 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7866 return PCI_ERS_RESULT_RECOVERED;
7867}
7868
7869/**
7870 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7871 * @pdev: PCI device struct
7872 *
7873 * Description: This routine is called when the PCI bus has
7874 * permanently failed.
7875 */
7876static void ipr_pci_perm_failure(struct pci_dev *pdev)
7877{
7878 unsigned long flags = 0;
7879 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7880
7881 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7882 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7883 ioa_cfg->sdt_state = ABORT_DUMP;
7884 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7885 ioa_cfg->in_ioa_bringdown = 1;
Kleber S. Souza6ff63892009-05-04 10:41:02 -03007886 ioa_cfg->allow_cmds = 0;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06007887 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7889}
7890
7891/**
7892 * ipr_pci_error_detected - Called when a PCI error is detected.
7893 * @pdev: PCI device struct
7894 * @state: PCI channel state
7895 *
7896 * Description: Called when a PCI error is detected.
7897 *
7898 * Return value:
7899 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7900 */
7901static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7902 pci_channel_state_t state)
7903{
7904 switch (state) {
7905 case pci_channel_io_frozen:
7906 ipr_pci_frozen(pdev);
7907 return PCI_ERS_RESULT_NEED_RESET;
7908 case pci_channel_io_perm_failure:
7909 ipr_pci_perm_failure(pdev);
7910 return PCI_ERS_RESULT_DISCONNECT;
7911 break;
7912 default:
7913 break;
7914 }
7915 return PCI_ERS_RESULT_NEED_RESET;
7916}
7917
7918/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007919 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7920 * @ioa_cfg: ioa cfg struct
7921 *
7922 * Description: This is the second phase of adapter intialization
7923 * This function takes care of initilizing the adapter to the point
7924 * where it can accept new commands.
7925
7926 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02007927 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07007928 **/
7929static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7930{
7931 int rc = 0;
7932 unsigned long host_lock_flags = 0;
7933
7934 ENTER;
7935 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7936 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06007937 if (ioa_cfg->needs_hard_reset) {
7938 ioa_cfg->needs_hard_reset = 0;
7939 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7940 } else
7941 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7942 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943
7944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7945 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7946 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7947
7948 if (ioa_cfg->ioa_is_dead) {
7949 rc = -EIO;
7950 } else if (ipr_invalid_adapter(ioa_cfg)) {
7951 if (!ipr_testmode)
7952 rc = -EIO;
7953
7954 dev_err(&ioa_cfg->pdev->dev,
7955 "Adapter not supported in this hardware configuration.\n");
7956 }
7957
7958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7959
7960 LEAVE;
7961 return rc;
7962}
7963
7964/**
7965 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7966 * @ioa_cfg: ioa config struct
7967 *
7968 * Return value:
7969 * none
7970 **/
7971static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7972{
7973 int i;
7974
7975 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7976 if (ioa_cfg->ipr_cmnd_list[i])
7977 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7978 ioa_cfg->ipr_cmnd_list[i],
7979 ioa_cfg->ipr_cmnd_list_dma[i]);
7980
7981 ioa_cfg->ipr_cmnd_list[i] = NULL;
7982 }
7983
7984 if (ioa_cfg->ipr_cmd_pool)
7985 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7986
7987 ioa_cfg->ipr_cmd_pool = NULL;
7988}
7989
7990/**
7991 * ipr_free_mem - Frees memory allocated for an adapter
7992 * @ioa_cfg: ioa cfg struct
7993 *
7994 * Return value:
7995 * nothing
7996 **/
7997static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7998{
7999 int i;
8000
8001 kfree(ioa_cfg->res_entries);
8002 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8003 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8004 ipr_free_cmd_blks(ioa_cfg);
8005 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8006 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008007 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8008 ioa_cfg->u.cfg_table,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008009 ioa_cfg->cfg_table_dma);
8010
8011 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8012 pci_free_consistent(ioa_cfg->pdev,
8013 sizeof(struct ipr_hostrcb),
8014 ioa_cfg->hostrcb[i],
8015 ioa_cfg->hostrcb_dma[i]);
8016 }
8017
8018 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008019 kfree(ioa_cfg->trace);
8020}
8021
8022/**
8023 * ipr_free_all_resources - Free all allocated resources for an adapter.
8024 * @ipr_cmd: ipr command struct
8025 *
8026 * This function frees all allocated resources for the
8027 * specified adapter.
8028 *
8029 * Return value:
8030 * none
8031 **/
8032static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8033{
8034 struct pci_dev *pdev = ioa_cfg->pdev;
8035
8036 ENTER;
8037 free_irq(pdev->irq, ioa_cfg);
Wayne Boyer5a9ef252009-01-23 09:17:35 -08008038 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008039 iounmap(ioa_cfg->hdw_dma_regs);
8040 pci_release_regions(pdev);
8041 ipr_free_mem(ioa_cfg);
8042 scsi_host_put(ioa_cfg->host);
8043 pci_disable_device(pdev);
8044 LEAVE;
8045}
8046
8047/**
8048 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8049 * @ioa_cfg: ioa config struct
8050 *
8051 * Return value:
8052 * 0 on success / -ENOMEM on allocation failure
8053 **/
8054static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8055{
8056 struct ipr_cmnd *ipr_cmd;
8057 struct ipr_ioarcb *ioarcb;
8058 dma_addr_t dma_addr;
8059 int i;
8060
8061 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
Wayne Boyera32c0552010-02-19 13:23:36 -08008062 sizeof(struct ipr_cmnd), 16, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008063
8064 if (!ioa_cfg->ipr_cmd_pool)
8065 return -ENOMEM;
8066
8067 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Christoph Lametere94b1762006-12-06 20:33:17 -08008068 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008069
8070 if (!ipr_cmd) {
8071 ipr_free_cmd_blks(ioa_cfg);
8072 return -ENOMEM;
8073 }
8074
8075 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8076 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8077 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8078
8079 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08008080 ipr_cmd->dma_addr = dma_addr;
8081 if (ioa_cfg->sis64)
8082 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8083 else
8084 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8085
Linus Torvalds1da177e2005-04-16 15:20:36 -07008086 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08008087 if (ioa_cfg->sis64) {
8088 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8089 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8090 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07008091 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08008092 } else {
8093 ioarcb->write_ioadl_addr =
8094 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8095 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8096 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07008097 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08008098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8100 ipr_cmd->cmd_index = i;
8101 ipr_cmd->ioa_cfg = ioa_cfg;
8102 ipr_cmd->sense_buffer_dma = dma_addr +
8103 offsetof(struct ipr_cmnd, sense_buffer);
8104
8105 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8106 }
8107
8108 return 0;
8109}
8110
8111/**
8112 * ipr_alloc_mem - Allocate memory for an adapter
8113 * @ioa_cfg: ioa config struct
8114 *
8115 * Return value:
8116 * 0 on success / non-zero for error
8117 **/
8118static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8119{
8120 struct pci_dev *pdev = ioa_cfg->pdev;
8121 int i, rc = -ENOMEM;
8122
8123 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008124 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008125 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008126
8127 if (!ioa_cfg->res_entries)
8128 goto out;
8129
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008130 if (ioa_cfg->sis64) {
8131 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8132 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8133 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8134 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8135 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8136 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8137 }
8138
8139 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008140 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008141 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143
8144 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8145 sizeof(struct ipr_misc_cbs),
8146 &ioa_cfg->vpd_cbs_dma);
8147
8148 if (!ioa_cfg->vpd_cbs)
8149 goto out_free_res_entries;
8150
8151 if (ipr_alloc_cmd_blks(ioa_cfg))
8152 goto out_free_vpd_cbs;
8153
8154 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8155 sizeof(u32) * IPR_NUM_CMD_BLKS,
8156 &ioa_cfg->host_rrq_dma);
8157
8158 if (!ioa_cfg->host_rrq)
8159 goto out_ipr_free_cmd_blocks;
8160
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008161 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8162 ioa_cfg->cfg_table_size,
8163 &ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008164
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008165 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008166 goto out_free_host_rrq;
8167
8168 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8169 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8170 sizeof(struct ipr_hostrcb),
8171 &ioa_cfg->hostrcb_dma[i]);
8172
8173 if (!ioa_cfg->hostrcb[i])
8174 goto out_free_hostrcb_dma;
8175
8176 ioa_cfg->hostrcb[i]->hostrcb_dma =
8177 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06008178 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008179 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8180 }
8181
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06008182 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008183 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8184
8185 if (!ioa_cfg->trace)
8186 goto out_free_hostrcb_dma;
8187
Linus Torvalds1da177e2005-04-16 15:20:36 -07008188 rc = 0;
8189out:
8190 LEAVE;
8191 return rc;
8192
8193out_free_hostrcb_dma:
8194 while (i-- > 0) {
8195 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8196 ioa_cfg->hostrcb[i],
8197 ioa_cfg->hostrcb_dma[i]);
8198 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008199 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8200 ioa_cfg->u.cfg_table,
8201 ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202out_free_host_rrq:
8203 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8204 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8205out_ipr_free_cmd_blocks:
8206 ipr_free_cmd_blks(ioa_cfg);
8207out_free_vpd_cbs:
8208 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8209 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8210out_free_res_entries:
8211 kfree(ioa_cfg->res_entries);
8212 goto out;
8213}
8214
8215/**
8216 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8217 * @ioa_cfg: ioa config struct
8218 *
8219 * Return value:
8220 * none
8221 **/
8222static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8223{
8224 int i;
8225
8226 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8227 ioa_cfg->bus_attr[i].bus = i;
8228 ioa_cfg->bus_attr[i].qas_enabled = 0;
8229 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8230 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8231 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8232 else
8233 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8234 }
8235}
8236
8237/**
8238 * ipr_init_ioa_cfg - Initialize IOA config struct
8239 * @ioa_cfg: ioa config struct
8240 * @host: scsi host struct
8241 * @pdev: PCI dev struct
8242 *
8243 * Return value:
8244 * none
8245 **/
8246static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8247 struct Scsi_Host *host, struct pci_dev *pdev)
8248{
8249 const struct ipr_interrupt_offsets *p;
8250 struct ipr_interrupts *t;
8251 void __iomem *base;
8252
8253 ioa_cfg->host = host;
8254 ioa_cfg->pdev = pdev;
8255 ioa_cfg->log_level = ipr_log_level;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06008256 ioa_cfg->doorbell = IPR_DOORBELL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008257 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8258 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8259 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8260 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8261 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8262 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8263 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8264 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8265
8266 INIT_LIST_HEAD(&ioa_cfg->free_q);
8267 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8268 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8269 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8270 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8271 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
David Howellsc4028952006-11-22 14:57:56 +00008272 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008273 init_waitqueue_head(&ioa_cfg->reset_wait_q);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008274 init_waitqueue_head(&ioa_cfg->msi_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008275 ioa_cfg->sdt_state = INACTIVE;
8276
8277 ipr_initialize_bus_attr(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008278 ioa_cfg->max_devs_supported = ipr_max_devs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008279
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008280 if (ioa_cfg->sis64) {
8281 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8282 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8283 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8284 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8285 } else {
8286 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8287 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8288 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8289 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008291 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8292 host->unique_id = host->host_no;
8293 host->max_cmd_len = IPR_MAX_CDB_LEN;
8294 pci_set_drvdata(pdev, ioa_cfg);
8295
8296 p = &ioa_cfg->chip_cfg->regs;
8297 t = &ioa_cfg->regs;
8298 base = ioa_cfg->hdw_dma_regs;
8299
8300 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8301 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008302 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008303 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008304 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008306 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008307 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008308 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008309 t->ioarrin_reg = base + p->ioarrin_reg;
8310 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008311 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008312 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008313 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008314 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08008315 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08008316
8317 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08008318 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08008319 t->dump_addr_reg = base + p->dump_addr_reg;
8320 t->dump_data_reg = base + p->dump_data_reg;
8321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322}
8323
8324/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008325 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326 * @dev_id: PCI device id struct
8327 *
8328 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008329 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07008330 **/
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008331static const struct ipr_chip_t * __devinit
8332ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008333{
8334 int i;
8335
Linus Torvalds1da177e2005-04-16 15:20:36 -07008336 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8337 if (ipr_chip[i].vendor == dev_id->vendor &&
8338 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008339 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008340 return NULL;
8341}
8342
8343/**
Wayne Boyer95fecd92009-06-16 15:13:28 -07008344 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8345 * @pdev: PCI device struct
8346 *
8347 * Description: Simply set the msi_received flag to 1 indicating that
8348 * Message Signaled Interrupts are supported.
8349 *
8350 * Return value:
8351 * 0 on success / non-zero on failure
8352 **/
8353static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8354{
8355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8356 unsigned long lock_flags = 0;
8357 irqreturn_t rc = IRQ_HANDLED;
8358
8359 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8360
8361 ioa_cfg->msi_received = 1;
8362 wake_up(&ioa_cfg->msi_wait_q);
8363
8364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8365 return rc;
8366}
8367
8368/**
8369 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8370 * @pdev: PCI device struct
8371 *
8372 * Description: The return value from pci_enable_msi() can not always be
8373 * trusted. This routine sets up and initiates a test interrupt to determine
8374 * if the interrupt is received via the ipr_test_intr() service routine.
8375 * If the tests fails, the driver will fall back to LSI.
8376 *
8377 * Return value:
8378 * 0 on success / non-zero on failure
8379 **/
8380static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8381 struct pci_dev *pdev)
8382{
8383 int rc;
8384 volatile u32 int_reg;
8385 unsigned long lock_flags = 0;
8386
8387 ENTER;
8388
8389 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8390 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8391 ioa_cfg->msi_received = 0;
8392 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -08008393 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008394 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8396
8397 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8398 if (rc) {
8399 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8400 return rc;
8401 } else if (ipr_debug)
8402 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8403
Wayne Boyer214777b2010-02-19 13:24:26 -08008404 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008405 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8406 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8407 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8408
8409 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8410 if (!ioa_cfg->msi_received) {
8411 /* MSI test failed */
8412 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8413 rc = -EOPNOTSUPP;
8414 } else if (ipr_debug)
8415 dev_info(&pdev->dev, "MSI test succeeded.\n");
8416
8417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8418
8419 free_irq(pdev->irq, ioa_cfg);
8420
8421 LEAVE;
8422
8423 return rc;
8424}
8425
8426/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008427 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8428 * @pdev: PCI device struct
8429 * @dev_id: PCI device id struct
8430 *
8431 * Return value:
8432 * 0 on success / non-zero on failure
8433 **/
8434static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8435 const struct pci_device_id *dev_id)
8436{
8437 struct ipr_ioa_cfg *ioa_cfg;
8438 struct Scsi_Host *host;
8439 unsigned long ipr_regs_pci;
8440 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -07008441 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -05008442 volatile u32 mask, uproc, interrupts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008443
8444 ENTER;
8445
8446 if ((rc = pci_enable_device(pdev))) {
8447 dev_err(&pdev->dev, "Cannot enable adapter\n");
8448 goto out;
8449 }
8450
8451 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8452
8453 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8454
8455 if (!host) {
8456 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8457 rc = -ENOMEM;
8458 goto out_disable;
8459 }
8460
8461 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8462 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Brian King35a39692006-09-25 12:39:20 -05008463 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8464 sata_port_info.flags, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008465
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008466 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008467
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008468 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008469 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8470 dev_id->vendor, dev_id->device);
8471 goto out_scsi_host_put;
8472 }
8473
Wayne Boyera32c0552010-02-19 13:23:36 -08008474 /* set SIS 32 or SIS 64 */
8475 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008476 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8477
Brian King5469cb52007-03-29 12:42:40 -05008478 if (ipr_transop_timeout)
8479 ioa_cfg->transop_timeout = ipr_transop_timeout;
8480 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8481 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8482 else
8483 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8484
Auke Kok44c10132007-06-08 15:46:36 -07008485 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -05008486
Linus Torvalds1da177e2005-04-16 15:20:36 -07008487 ipr_regs_pci = pci_resource_start(pdev, 0);
8488
8489 rc = pci_request_regions(pdev, IPR_NAME);
8490 if (rc < 0) {
8491 dev_err(&pdev->dev,
8492 "Couldn't register memory range of registers\n");
8493 goto out_scsi_host_put;
8494 }
8495
Arjan van de Ven25729a72008-09-28 16:18:02 -07008496 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008497
8498 if (!ipr_regs) {
8499 dev_err(&pdev->dev,
8500 "Couldn't map memory range of registers\n");
8501 rc = -ENOMEM;
8502 goto out_release_regions;
8503 }
8504
8505 ioa_cfg->hdw_dma_regs = ipr_regs;
8506 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8507 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8508
8509 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8510
8511 pci_set_master(pdev);
8512
Wayne Boyera32c0552010-02-19 13:23:36 -08008513 if (ioa_cfg->sis64) {
8514 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8515 if (rc < 0) {
8516 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8517 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8518 }
8519
8520 } else
8521 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8522
Linus Torvalds1da177e2005-04-16 15:20:36 -07008523 if (rc < 0) {
8524 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8525 goto cleanup_nomem;
8526 }
8527
8528 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8529 ioa_cfg->chip_cfg->cache_line_size);
8530
8531 if (rc != PCIBIOS_SUCCESSFUL) {
8532 dev_err(&pdev->dev, "Write of cache line size failed\n");
8533 rc = -EIO;
8534 goto cleanup_nomem;
8535 }
8536
Wayne Boyer95fecd92009-06-16 15:13:28 -07008537 /* Enable MSI style interrupts if they are supported. */
Wayne Boyer1be7bd82009-06-17 09:55:35 -07008538 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
Wayne Boyer95fecd92009-06-16 15:13:28 -07008539 rc = ipr_test_msi(ioa_cfg, pdev);
8540 if (rc == -EOPNOTSUPP)
8541 pci_disable_msi(pdev);
8542 else if (rc)
8543 goto out_msi_disable;
8544 else
8545 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8546 } else if (ipr_debug)
8547 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8548
Linus Torvalds1da177e2005-04-16 15:20:36 -07008549 /* Save away PCI config space for use following IOA reset */
8550 rc = pci_save_state(pdev);
8551
8552 if (rc != PCIBIOS_SUCCESSFUL) {
8553 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8554 rc = -EIO;
8555 goto cleanup_nomem;
8556 }
8557
8558 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8559 goto cleanup_nomem;
8560
8561 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8562 goto cleanup_nomem;
8563
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008564 if (ioa_cfg->sis64)
8565 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8566 + ((sizeof(struct ipr_config_table_entry64)
8567 * ioa_cfg->max_devs_supported)));
8568 else
8569 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8570 + ((sizeof(struct ipr_config_table_entry)
8571 * ioa_cfg->max_devs_supported)));
8572
Linus Torvalds1da177e2005-04-16 15:20:36 -07008573 rc = ipr_alloc_mem(ioa_cfg);
8574 if (rc < 0) {
8575 dev_err(&pdev->dev,
8576 "Couldn't allocate enough memory for device driver!\n");
8577 goto cleanup_nomem;
8578 }
8579
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008580 /*
8581 * If HRRQ updated interrupt is not masked, or reset alert is set,
8582 * the card is in an unknown state and needs a hard reset
8583 */
Wayne Boyer214777b2010-02-19 13:24:26 -08008584 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8585 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8586 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008587 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8588 ioa_cfg->needs_hard_reset = 1;
Brian King473b1e82007-05-02 10:44:11 -05008589 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8590 ioa_cfg->needs_hard_reset = 1;
8591 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8592 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06008593
Linus Torvalds1da177e2005-04-16 15:20:36 -07008594 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008595 rc = request_irq(pdev->irq, ipr_isr,
8596 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8597 IPR_NAME, ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008598
8599 if (rc) {
8600 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8601 pdev->irq, rc);
8602 goto cleanup_nolog;
8603 }
8604
Brian King463fc692007-05-07 17:09:05 -05008605 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8606 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8607 ioa_cfg->needs_warm_reset = 1;
8608 ioa_cfg->reset = ipr_reset_slot_reset;
8609 } else
8610 ioa_cfg->reset = ipr_reset_start_bist;
8611
Linus Torvalds1da177e2005-04-16 15:20:36 -07008612 spin_lock(&ipr_driver_lock);
8613 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8614 spin_unlock(&ipr_driver_lock);
8615
8616 LEAVE;
8617out:
8618 return rc;
8619
8620cleanup_nolog:
8621 ipr_free_mem(ioa_cfg);
8622cleanup_nomem:
8623 iounmap(ipr_regs);
Wayne Boyer95fecd92009-06-16 15:13:28 -07008624out_msi_disable:
8625 pci_disable_msi(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008626out_release_regions:
8627 pci_release_regions(pdev);
8628out_scsi_host_put:
8629 scsi_host_put(host);
8630out_disable:
8631 pci_disable_device(pdev);
8632 goto out;
8633}
8634
8635/**
8636 * ipr_scan_vsets - Scans for VSET devices
8637 * @ioa_cfg: ioa config struct
8638 *
8639 * Description: Since the VSET resources do not follow SAM in that we can have
8640 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8641 *
8642 * Return value:
8643 * none
8644 **/
8645static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8646{
8647 int target, lun;
8648
8649 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8650 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8651 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8652}
8653
8654/**
8655 * ipr_initiate_ioa_bringdown - Bring down an adapter
8656 * @ioa_cfg: ioa config struct
8657 * @shutdown_type: shutdown type
8658 *
8659 * Description: This function will initiate bringing down the adapter.
8660 * This consists of issuing an IOA shutdown to the adapter
8661 * to flush the cache, and running BIST.
8662 * If the caller needs to wait on the completion of the reset,
8663 * the caller must sleep on the reset_wait_q.
8664 *
8665 * Return value:
8666 * none
8667 **/
8668static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8669 enum ipr_shutdown_type shutdown_type)
8670{
8671 ENTER;
8672 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8673 ioa_cfg->sdt_state = ABORT_DUMP;
8674 ioa_cfg->reset_retries = 0;
8675 ioa_cfg->in_ioa_bringdown = 1;
8676 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8677 LEAVE;
8678}
8679
8680/**
8681 * __ipr_remove - Remove a single adapter
8682 * @pdev: pci device struct
8683 *
8684 * Adapter hot plug remove entry point.
8685 *
8686 * Return value:
8687 * none
8688 **/
8689static void __ipr_remove(struct pci_dev *pdev)
8690{
8691 unsigned long host_lock_flags = 0;
8692 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8693 ENTER;
8694
8695 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008696 while(ioa_cfg->in_reset_reload) {
8697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8698 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8699 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8700 }
8701
Linus Torvalds1da177e2005-04-16 15:20:36 -07008702 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8703
8704 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8705 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
brking@us.ibm.com 5cbf5ea2005-05-02 19:50:47 -05008706 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008707 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8708
8709 spin_lock(&ipr_driver_lock);
8710 list_del(&ioa_cfg->queue);
8711 spin_unlock(&ipr_driver_lock);
8712
8713 if (ioa_cfg->sdt_state == ABORT_DUMP)
8714 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8715 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8716
8717 ipr_free_all_resources(ioa_cfg);
8718
8719 LEAVE;
8720}
8721
8722/**
8723 * ipr_remove - IOA hot plug remove entry point
8724 * @pdev: pci device struct
8725 *
8726 * Adapter hot plug remove entry point.
8727 *
8728 * Return value:
8729 * none
8730 **/
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008731static void __devexit ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008732{
8733 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8734
8735 ENTER;
8736
Tony Jonesee959b02008-02-22 00:13:36 +01008737 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +01008739 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008740 &ipr_dump_attr);
8741 scsi_remove_host(ioa_cfg->host);
8742
8743 __ipr_remove(pdev);
8744
8745 LEAVE;
8746}
8747
8748/**
8749 * ipr_probe - Adapter hot plug add entry point
8750 *
8751 * Return value:
8752 * 0 on success / non-zero on failure
8753 **/
8754static int __devinit ipr_probe(struct pci_dev *pdev,
8755 const struct pci_device_id *dev_id)
8756{
8757 struct ipr_ioa_cfg *ioa_cfg;
8758 int rc;
8759
8760 rc = ipr_probe_ioa(pdev, dev_id);
8761
8762 if (rc)
8763 return rc;
8764
8765 ioa_cfg = pci_get_drvdata(pdev);
8766 rc = ipr_probe_ioa_part2(ioa_cfg);
8767
8768 if (rc) {
8769 __ipr_remove(pdev);
8770 return rc;
8771 }
8772
8773 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8774
8775 if (rc) {
8776 __ipr_remove(pdev);
8777 return rc;
8778 }
8779
Tony Jonesee959b02008-02-22 00:13:36 +01008780 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008781 &ipr_trace_attr);
8782
8783 if (rc) {
8784 scsi_remove_host(ioa_cfg->host);
8785 __ipr_remove(pdev);
8786 return rc;
8787 }
8788
Tony Jonesee959b02008-02-22 00:13:36 +01008789 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008790 &ipr_dump_attr);
8791
8792 if (rc) {
Tony Jonesee959b02008-02-22 00:13:36 +01008793 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008794 &ipr_trace_attr);
8795 scsi_remove_host(ioa_cfg->host);
8796 __ipr_remove(pdev);
8797 return rc;
8798 }
8799
8800 scsi_scan_host(ioa_cfg->host);
8801 ipr_scan_vsets(ioa_cfg);
8802 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8803 ioa_cfg->allow_ml_add_del = 1;
brking@us.ibm.com11cd8f12005-11-01 17:00:11 -06008804 ioa_cfg->host->max_channel = IPR_VSET_BUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008805 schedule_work(&ioa_cfg->work_q);
8806 return 0;
8807}
8808
8809/**
8810 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008811 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07008812 *
8813 * This function is invoked upon system shutdown/reboot. It will issue
8814 * an adapter shutdown to the adapter to flush the write cache.
8815 *
8816 * Return value:
8817 * none
8818 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008819static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008820{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008821 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008822 unsigned long lock_flags = 0;
8823
8824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King970ea292007-04-26 16:00:06 -05008825 while(ioa_cfg->in_reset_reload) {
8826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8827 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8828 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8829 }
8830
Linus Torvalds1da177e2005-04-16 15:20:36 -07008831 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8833 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8834}
8835
8836static struct pci_device_id ipr_pci_table[] __devinitdata = {
8837 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008838 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008840 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008841 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008843 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -06008844 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008845 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008846 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008847 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008848 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008849 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -06008850 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008851 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -05008852 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8853 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008854 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008855 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008856 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8858 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008859 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008860 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8861 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008862 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -06008863 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008864 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -05008865 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8866 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -06008867 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -05008868 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8869 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008870 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -05008871 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8872 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -05008873 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -05008874 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8875 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -05008876 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -05008877 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008878 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -06008879 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008880 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -06008881 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -06008882 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008883 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8884 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -06008885 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -05008886 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8887 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -08008888 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8889 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8890 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8891 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8892 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8893 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8894 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8895 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8896 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8897 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8898 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8899 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8900 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8901 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8902 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8903 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07008904 { }
8905};
8906MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8907
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008908static struct pci_error_handlers ipr_err_handler = {
8909 .error_detected = ipr_pci_error_detected,
8910 .slot_reset = ipr_pci_slot_reset,
8911};
8912
Linus Torvalds1da177e2005-04-16 15:20:36 -07008913static struct pci_driver ipr_driver = {
8914 .name = IPR_NAME,
8915 .id_table = ipr_pci_table,
8916 .probe = ipr_probe,
Kleber S. Souzaf3816422009-04-22 10:50:28 -03008917 .remove = __devexit_p(ipr_remove),
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07008918 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -06008919 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008920};
8921
8922/**
Wayne Boyerf72919e2010-02-19 13:24:21 -08008923 * ipr_halt_done - Shutdown prepare completion
8924 *
8925 * Return value:
8926 * none
8927 **/
8928static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8929{
8930 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8931
8932 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8933}
8934
8935/**
8936 * ipr_halt - Issue shutdown prepare to all adapters
8937 *
8938 * Return value:
8939 * NOTIFY_OK on success / NOTIFY_DONE on failure
8940 **/
8941static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8942{
8943 struct ipr_cmnd *ipr_cmd;
8944 struct ipr_ioa_cfg *ioa_cfg;
8945 unsigned long flags = 0;
8946
8947 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8948 return NOTIFY_DONE;
8949
8950 spin_lock(&ipr_driver_lock);
8951
8952 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8953 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8954 if (!ioa_cfg->allow_cmds) {
8955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8956 continue;
8957 }
8958
8959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8960 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8961 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8962 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8963 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8964
8965 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8967 }
8968 spin_unlock(&ipr_driver_lock);
8969
8970 return NOTIFY_OK;
8971}
8972
8973static struct notifier_block ipr_notifier = {
8974 ipr_halt, NULL, 0
8975};
8976
8977/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008978 * ipr_init - Module entry point
8979 *
8980 * Return value:
8981 * 0 on success / negative value on failure
8982 **/
8983static int __init ipr_init(void)
8984{
8985 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8986 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8987
Wayne Boyerf72919e2010-02-19 13:24:21 -08008988 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -07008989 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008990}
8991
8992/**
8993 * ipr_exit - Module unload
8994 *
8995 * Module unload entry point.
8996 *
8997 * Return value:
8998 * none
8999 **/
9000static void __exit ipr_exit(void)
9001{
Wayne Boyerf72919e2010-02-19 13:24:21 -08009002 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009003 pci_unregister_driver(&ipr_driver);
9004}
9005
9006module_init(ipr_init);
9007module_exit(ipr_exit);